xref: /illumos-gate/usr/src/uts/intel/kdi/kdi_asm.S (revision 763f1f5f97e4c16840af2ced98915f0ed0f46616)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright 2019 Joyent, Inc.
27 */
28
29/*
30 * Debugger entry and exit for both master and slave CPUs. kdi_idthdl.s contains
31 * the IDT stubs that drop into here (mainly via kdi_cmnint).
32 */
33
34#include <sys/segments.h>
35#include <sys/asm_linkage.h>
36#include <sys/controlregs.h>
37#include <sys/x86_archext.h>
38#include <sys/privregs.h>
39#include <sys/machprivregs.h>
40#include <sys/kdi_regs.h>
41#include <sys/psw.h>
42#include <sys/uadmin.h>
43#ifdef __xpv
44#include <sys/hypervisor.h>
45#endif
46#include <kdi_assym.h>
47#include <assym.h>
48
49/* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
50#define	GET_CPUSAVE_ADDR \
51	movzbq	%gs:CPU_ID, %rbx;		\
52	movq	%rbx, %rax;			\
53	movq	$KRS_SIZE, %rcx;		\
54	mulq	%rcx;				\
55	movq	$kdi_cpusave, %rdx;		\
56	/*CSTYLED*/				\
57	addq	(%rdx), %rax
58
59/*
60 * Save copies of the IDT and GDT descriptors.  Note that we only save the IDT
61 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
62 * debugger through the trap handler.  We don't want to clobber the saved IDT
63 * in the process, as we'd end up resuming the world on our IDT.
64 */
65#define	SAVE_IDTGDT				\
66	movq	%gs:CPU_IDT, %r11;		\
67	leaq    kdi_idt(%rip), %rsi;		\
68	cmpq	%rsi, %r11;			\
69	je	1f;				\
70	movq	%r11, KRS_IDT(%rax);		\
71	movq	%gs:CPU_GDT, %r11;		\
72	movq	%r11, KRS_GDT(%rax);		\
731:
74
75#ifdef __xpv
76
77/*
78 * Already on kernel gsbase via the hypervisor.
79 */
80#define	SAVE_GSBASE(reg) /* nothing */
81#define	RESTORE_GSBASE(reg) /* nothing */
82
83#else
84
85#define	SAVE_GSBASE(base)				\
86	movl	$MSR_AMD_GSBASE, %ecx;			\
87	rdmsr;						\
88	shlq	$32, %rdx;				\
89	orq	%rax, %rdx;				\
90	movq	%rdx, REG_OFF(KDIREG_GSBASE)(base);	\
91	movl	$MSR_AMD_KGSBASE, %ecx;			\
92	rdmsr;						\
93	shlq	$32, %rdx;				\
94	orq	%rax, %rdx;				\
95	movq	%rdx, REG_OFF(KDIREG_KGSBASE)(base)
96
97/*
98 * We shouldn't have stomped on KGSBASE, so don't try to restore it.
99 */
100#define	RESTORE_GSBASE(base)				\
101	movq	REG_OFF(KDIREG_GSBASE)(base), %rdx;	\
102	movq	%rdx, %rax;				\
103	shrq	$32, %rdx;				\
104	movl	$MSR_AMD_GSBASE, %ecx;			\
105	wrmsr
106
107#endif /* __xpv */
108
109/*
110 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.
111 */
112#define	KDI_SAVE_REGS(base) \
113	movq	%rdi, REG_OFF(KDIREG_RDI)(base);	\
114	movq	%rsi, REG_OFF(KDIREG_RSI)(base);	\
115	movq	%rdx, REG_OFF(KDIREG_RDX)(base);	\
116	movq	%rcx, REG_OFF(KDIREG_RCX)(base);	\
117	movq	%r8, REG_OFF(KDIREG_R8)(base);		\
118	movq	%r9, REG_OFF(KDIREG_R9)(base);		\
119	movq	%rax, REG_OFF(KDIREG_RAX)(base);	\
120	movq	%rbx, REG_OFF(KDIREG_RBX)(base);	\
121	movq	%rbp, REG_OFF(KDIREG_RBP)(base);	\
122	movq	%r10, REG_OFF(KDIREG_R10)(base);	\
123	movq	%r11, REG_OFF(KDIREG_R11)(base);	\
124	movq	%r12, REG_OFF(KDIREG_R12)(base);	\
125	movq	%r13, REG_OFF(KDIREG_R13)(base);	\
126	movq	%r14, REG_OFF(KDIREG_R14)(base);	\
127	movq	%r15, REG_OFF(KDIREG_R15)(base);	\
128	movq	%rbp, REG_OFF(KDIREG_SAVFP)(base);	\
129	movq	REG_OFF(KDIREG_RIP)(base), %rax;	\
130	movq	%rax, REG_OFF(KDIREG_SAVPC)(base);	\
131	movq	%cr2, %rax;				\
132	movq	%rax, REG_OFF(KDIREG_CR2)(base);	\
133	clrq	%rax;					\
134	movw	%ds, %ax;				\
135	movq	%rax, REG_OFF(KDIREG_DS)(base);		\
136	movw	%es, %ax;				\
137	movq	%rax, REG_OFF(KDIREG_ES)(base);		\
138	movw	%fs, %ax;				\
139	movq	%rax, REG_OFF(KDIREG_FS)(base);		\
140	movw	%gs, %ax;				\
141	movq	%rax, REG_OFF(KDIREG_GS)(base);		\
142	SAVE_GSBASE(base)
143
144#define	KDI_RESTORE_REGS(base) \
145	movq	base, %rdi;				\
146	RESTORE_GSBASE(%rdi);				\
147	movq	REG_OFF(KDIREG_ES)(%rdi), %rax;		\
148	movw	%ax, %es;				\
149	movq	REG_OFF(KDIREG_DS)(%rdi), %rax;		\
150	movw	%ax, %ds;				\
151	movq	REG_OFF(KDIREG_CR2)(base), %rax;	\
152	movq	%rax, %cr2;				\
153	movq	REG_OFF(KDIREG_R15)(%rdi), %r15;	\
154	movq	REG_OFF(KDIREG_R14)(%rdi), %r14;	\
155	movq	REG_OFF(KDIREG_R13)(%rdi), %r13;	\
156	movq	REG_OFF(KDIREG_R12)(%rdi), %r12;	\
157	movq	REG_OFF(KDIREG_R11)(%rdi), %r11;	\
158	movq	REG_OFF(KDIREG_R10)(%rdi), %r10;	\
159	movq	REG_OFF(KDIREG_RBP)(%rdi), %rbp;	\
160	movq	REG_OFF(KDIREG_RBX)(%rdi), %rbx;	\
161	movq	REG_OFF(KDIREG_RAX)(%rdi), %rax;	\
162	movq	REG_OFF(KDIREG_R9)(%rdi), %r9;		\
163	movq	REG_OFF(KDIREG_R8)(%rdi), %r8;		\
164	movq	REG_OFF(KDIREG_RCX)(%rdi), %rcx;	\
165	movq	REG_OFF(KDIREG_RDX)(%rdi), %rdx;	\
166	movq	REG_OFF(KDIREG_RSI)(%rdi), %rsi;	\
167	movq	REG_OFF(KDIREG_RDI)(%rdi), %rdi
168
169/*
170 * Given the address of the current CPU's cpusave area in %rax, the following
171 * macro restores the debugging state to said CPU.  Restored state includes
172 * the debug registers from the global %dr variables.
173 *
174 * Takes the cpusave area in %rdi as a parameter.
175 */
176#define	KDI_RESTORE_DEBUGGING_STATE \
177	pushq	%rdi;						\
178	leaq	kdi_drreg(%rip), %r15;				\
179	movl	$7, %edi;					\
180	movq	DR_CTL(%r15), %rsi;				\
181	call	kdi_dreg_set;					\
182								\
183	movl	$6, %edi;					\
184	movq	$KDIREG_DRSTAT_RESERVED, %rsi;			\
185	call	kdi_dreg_set;					\
186								\
187	movl	$0, %edi;					\
188	movq	DRADDR_OFF(0)(%r15), %rsi;			\
189	call	kdi_dreg_set;					\
190	movl	$1, %edi;					\
191	movq	DRADDR_OFF(1)(%r15), %rsi;			\
192	call	kdi_dreg_set;					\
193	movl	$2, %edi;					\
194	movq	DRADDR_OFF(2)(%r15), %rsi;			\
195	call	kdi_dreg_set;					\
196	movl	$3, %edi;					\
197	movq	DRADDR_OFF(3)(%r15), %rsi;			\
198	call	kdi_dreg_set;					\
199	popq	%rdi;
200
201/*
202 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
203 * The following macros manage the buffer.
204 */
205
206/* Advance the ring buffer */
207#define	ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
208	movq	KRS_CURCRUMBIDX(cpusave), tmp1;	\
209	cmpq	$[KDI_NCRUMBS - 1], tmp1;	\
210	jge	1f;				\
211	/* Advance the pointer and index */	\
212	addq	$1, tmp1;			\
213	movq	tmp1, KRS_CURCRUMBIDX(cpusave);	\
214	movq	KRS_CURCRUMB(cpusave), tmp1;	\
215	addq	$KRM_SIZE, tmp1;		\
216	jmp	2f;				\
2171:	/* Reset the pointer and index */	\
218	movq	$0, KRS_CURCRUMBIDX(cpusave);	\
219	leaq	KRS_CRUMBS(cpusave), tmp1;	\
2202:	movq	tmp1, KRS_CURCRUMB(cpusave);	\
221	/* Clear the new crumb */		\
222	movq	$KDI_NCRUMBS, tmp2;		\
2233:	movq	$0, -4(tmp1, tmp2, 4);		\
224	decq	tmp2;				\
225	jnz	3b
226
227/* Set a value in the current breadcrumb buffer */
228#define	ADD_CRUMB(cpusave, offset, value, tmp) \
229	movq	KRS_CURCRUMB(cpusave), tmp;	\
230	movq	value, offset(tmp)
231
232	/* XXX implement me */
233	ENTRY_NP(kdi_nmiint)
234	clrq	%rcx
235	movq	(%rcx), %rcx
236	SET_SIZE(kdi_nmiint)
237
238	/*
239	 * The main entry point for master CPUs.  It also serves as the trap
240	 * handler for all traps and interrupts taken during single-step.
241	 */
242	ENTRY_NP(kdi_cmnint)
243	ALTENTRY(kdi_master_entry)
244
245	pushq	%rax
246	CLI(%rax)
247	popq	%rax
248
249	/* Save current register state */
250	subq	$REG_OFF(KDIREG_TRAPNO), %rsp
251	KDI_SAVE_REGS(%rsp)
252
253#ifdef __xpv
254	/*
255	 * Clear saved_upcall_mask in unused byte of cs slot on stack.
256	 * It can only confuse things.
257	 */
258	movb	$0, REG_OFF(KDIREG_CS)+4(%rsp)
259#endif
260
261#if !defined(__xpv)
262	/*
263	 * Switch to the kernel's GSBASE.  Neither GSBASE nor the ill-named
264	 * KGSBASE can be trusted, as the kernel may or may not have already
265	 * done a swapgs.  All is not lost, as the kernel can divine the correct
266	 * value for us.  Note that the previous GSBASE is saved in the
267	 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
268	 * blown away.  On the hypervisor, we don't need to do this, since it's
269	 * ensured we're on our requested kernel GSBASE already.
270	 *
271	 * No need to worry about swapgs speculation here as it's unconditional
272	 * and via wrmsr anyway.
273	 */
274	subq	$10, %rsp
275	sgdt	(%rsp)
276	movq	2(%rsp), %rdi	/* gdt base now in %rdi */
277	addq	$10, %rsp
278	call	kdi_gdt2gsbase	/* returns kernel's GSBASE in %rax */
279
280	movq	%rax, %rdx
281	shrq	$32, %rdx
282	movl	$MSR_AMD_GSBASE, %ecx
283	wrmsr
284
285	/*
286	 * In the trampoline we stashed the incoming %cr3. Copy this into
287	 * the kdiregs for restoration and later use.
288	 */
289	mov	%gs:(CPU_KPTI_DBG+KPTI_TR_CR3), %rdx
290	mov	%rdx, REG_OFF(KDIREG_CR3)(%rsp)
291	/*
292	 * Switch to the kernel's %cr3. From the early interrupt handler
293	 * until now we've been running on the "paranoid" %cr3 (that of kas
294	 * from early in boot).
295	 *
296	 * If we took the interrupt from somewhere already on the kas/paranoid
297	 * %cr3 though, don't change it (this could happen if kcr3 is corrupt
298	 * and we took a gptrap earlier from this very code).
299	 */
300	cmpq	%rdx, kpti_safe_cr3
301	je	.no_kcr3
302	mov	%gs:CPU_KPTI_KCR3, %rdx
303	cmpq	$0, %rdx
304	je	.no_kcr3
305	mov	%rdx, %cr3
306.no_kcr3:
307
308#endif	/* __xpv */
309
310	GET_CPUSAVE_ADDR	/* %rax = cpusave, %rbx = CPU ID */
311
312	ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
313
314	ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
315
316	movq	REG_OFF(KDIREG_RIP)(%rsp), %rcx
317	ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
318	ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
319	movq	REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
320	ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
321
322	movq	%rsp, %rbp
323	pushq	%rax
324
325	/*
326	 * Were we in the debugger when we took the trap (i.e. was %esp in one
327	 * of the debugger's memory ranges)?
328	 */
329	leaq	kdi_memranges, %rcx
330	movl	kdi_nmemranges, %edx
3311:
332	cmpq	MR_BASE(%rcx), %rsp
333	jl	2f		/* below this range -- try the next one */
334	cmpq	MR_LIM(%rcx), %rsp
335	jg	2f		/* above this range -- try the next one */
336	jmp	3f		/* matched within this range */
337
3382:
339	decl	%edx
340	jz	kdi_save_common_state	/* %rsp not within debugger memory */
341	addq	$MR_SIZE, %rcx
342	jmp	1b
343
3443:	/*
345	 * The master is still set.  That should only happen if we hit a trap
346	 * while running in the debugger.  Note that it may be an intentional
347	 * fault.  kmdb_dpi_handle_fault will sort it all out.
348	 */
349
350	movq	REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
351	movq	REG_OFF(KDIREG_RIP)(%rbp), %rsi
352	movq	REG_OFF(KDIREG_RSP)(%rbp), %rdx
353	movq	%rbx, %rcx		/* cpuid */
354
355	call	kdi_dvec_handle_fault
356
357	/*
358	 * If we're here, we ran into a debugger problem, and the user
359	 * elected to solve it by having the debugger debug itself.  The
360	 * state we're about to save is that of the debugger when it took
361	 * the fault.
362	 */
363
364	jmp	kdi_save_common_state
365
366	SET_SIZE(kdi_master_entry)
367	SET_SIZE(kdi_cmnint)
368
369/*
370 * The cross-call handler for slave CPUs.
371 *
372 * The debugger is single-threaded, so only one CPU, called the master, may be
373 * running it at any given time.  The other CPUs, known as slaves, spin in a
374 * busy loop until there's something for them to do.  This is the entry point
375 * for the slaves - they'll be sent here in response to a cross-call sent by the
376 * master.
377 */
378
379	ENTRY_NP(kdi_slave_entry)
380
381	/*
382	 * Cross calls are implemented as function calls, so our stack currently
383	 * looks like one you'd get from a zero-argument function call.  That
384	 * is, there's the return %rip at %rsp, and that's about it.  We need
385	 * to make it look like an interrupt stack.  When we first save, we'll
386	 * reverse the saved %ss and %rip, which we'll fix back up when we've
387	 * freed up some general-purpose registers.  We'll also need to fix up
388	 * the saved %rsp.
389	 */
390
391	pushq	%rsp		/* pushed value off by 8 */
392	pushfq
393	CLI(%rax)
394	pushq	$KCS_SEL
395	clrq	%rax
396	movw	%ss, %ax
397	pushq	%rax		/* rip should be here */
398	pushq	$-1		/* phony trap error code */
399	pushq	$-1		/* phony trap number */
400
401	subq	$REG_OFF(KDIREG_TRAPNO), %rsp
402	KDI_SAVE_REGS(%rsp)
403
404	movq	%cr3, %rax
405	movq	%rax, REG_OFF(KDIREG_CR3)(%rsp)
406
407	movq	REG_OFF(KDIREG_SS)(%rsp), %rax
408	movq	%rax, REG_OFF(KDIREG_SAVPC)(%rsp)
409	xchgq	REG_OFF(KDIREG_RIP)(%rsp), %rax
410	movq	%rax, REG_OFF(KDIREG_SS)(%rsp)
411
412	movq	REG_OFF(KDIREG_RSP)(%rsp), %rax
413	addq	$8, %rax
414	movq	%rax, REG_OFF(KDIREG_RSP)(%rsp)
415
416	/*
417	 * We've saved all of the general-purpose registers, and have a stack
418	 * that is irettable (after we strip down to the error code)
419	 */
420
421	GET_CPUSAVE_ADDR	/* %rax = cpusave, %rbx = CPU ID */
422
423	ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
424
425	ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
426
427	movq	REG_OFF(KDIREG_RIP)(%rsp), %rcx
428	ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
429	movq	REG_OFF(KDIREG_RSP)(%rsp), %rcx
430	ADD_CRUMB(%rax, KRM_SP, %rcx, %rdx)
431	ADD_CRUMB(%rax, KRM_TRAPNO, $-1, %rdx)
432
433	movq    $KDI_CPU_STATE_SLAVE, KRS_CPU_STATE(%rax)
434
435	pushq	%rax
436	jmp	kdi_save_common_state
437
438	SET_SIZE(kdi_slave_entry)
439
440/*
441 * The state of the world:
442 *
443 * The stack has a complete set of saved registers and segment
444 * selectors, arranged in the kdi_regs.h order.  It also has a pointer
445 * to our cpusave area.
446 *
447 * We need to save, into the cpusave area, a pointer to these saved
448 * registers.  First we check whether we should jump straight back to
449 * the kernel.  If not, we save a few more registers, ready the
450 * machine for debugger entry, and enter the debugger.
451 */
452
453	ENTRY_NP(kdi_save_common_state)
454
455	popq	%rdi			/* the cpusave area */
456	movq	%rsp, KRS_GREGS(%rdi)	/* save ptr to current saved regs */
457
458	pushq	%rdi
459	call	kdi_trap_pass
460	testq	%rax, %rax
461	jnz	kdi_pass_to_kernel
462	popq	%rax /* cpusave in %rax */
463
464	SAVE_IDTGDT
465
466#if !defined(__xpv)
467	/* Save off %cr0, and clear write protect */
468	movq	%cr0, %rcx
469	movq	%rcx, KRS_CR0(%rax)
470	andq	$_BITNOT(CR0_WP), %rcx
471	movq	%rcx, %cr0
472#endif
473
474	/* Save the debug registers and disable any active watchpoints */
475
476	movq	%rax, %r15		/* save cpusave area ptr */
477	movl	$7, %edi
478	call	kdi_dreg_get
479	movq	%rax, KRS_DRCTL(%r15)
480
481	andq	$_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
482	movq	%rax, %rsi
483	movl	$7, %edi
484	call	kdi_dreg_set
485
486	movl	$6, %edi
487	call	kdi_dreg_get
488	movq	%rax, KRS_DRSTAT(%r15)
489
490	movl	$0, %edi
491	call	kdi_dreg_get
492	movq	%rax, KRS_DROFF(0)(%r15)
493
494	movl	$1, %edi
495	call	kdi_dreg_get
496	movq	%rax, KRS_DROFF(1)(%r15)
497
498	movl	$2, %edi
499	call	kdi_dreg_get
500	movq	%rax, KRS_DROFF(2)(%r15)
501
502	movl	$3, %edi
503	call	kdi_dreg_get
504	movq	%rax, KRS_DROFF(3)(%r15)
505
506	movq	%r15, %rax	/* restore cpu save area to rax */
507
508	clrq	%rbp		/* stack traces should end here */
509
510	pushq	%rax
511	movq	%rax, %rdi	/* cpusave */
512
513	call	kdi_debugger_entry
514
515	/* Pass cpusave to kdi_resume */
516	popq	%rdi
517
518	jmp	kdi_resume
519
520	SET_SIZE(kdi_save_common_state)
521
522/*
523 * Resume the world.  The code that calls kdi_resume has already
524 * decided whether or not to restore the IDT.
525 */
526	/* cpusave in %rdi */
527	ENTRY_NP(kdi_resume)
528
529	/*
530	 * Send this CPU back into the world
531	 */
532#if !defined(__xpv)
533	movq	KRS_CR0(%rdi), %rdx
534	movq	%rdx, %cr0
535#endif
536
537	KDI_RESTORE_DEBUGGING_STATE
538
539	movq	KRS_GREGS(%rdi), %rsp
540
541#if !defined(__xpv)
542	/*
543	 * If we're going back via tr_iret_kdi, then we want to copy the
544	 * final %cr3 we're going to back into the kpti_dbg area now.
545	 *
546	 * Since the trampoline needs to find the kpti_dbg too, we enter it
547	 * with %r13 set to point at that. The real %r13 (to restore before
548	 * the iret) we stash in the kpti_dbg itself.
549	 */
550	movq	%gs:CPU_SELF, %r13	/* can't leaq %gs:*, use self-ptr */
551	addq	$CPU_KPTI_DBG, %r13
552
553	movq	REG_OFF(KDIREG_R13)(%rsp), %rdx
554	movq	%rdx, KPTI_R13(%r13)
555
556	movq	REG_OFF(KDIREG_CR3)(%rsp), %rdx
557	movq	%rdx, KPTI_TR_CR3(%r13)
558
559	/* The trampoline will undo this later. */
560	movq	%r13, REG_OFF(KDIREG_R13)(%rsp)
561#endif
562
563	KDI_RESTORE_REGS(%rsp)
564	addq	$REG_OFF(KDIREG_RIP), %rsp	/* Discard state, trapno, err */
565	/*
566	 * The common trampoline code will restore %cr3 to the right value
567	 * for either kernel or userland.
568	 */
569#if !defined(__xpv)
570	jmp	tr_iret_kdi
571#else
572	IRET
573#endif
574	/*NOTREACHED*/
575	SET_SIZE(kdi_resume)
576
577
578	/*
579	 * We took a trap that should be handled by the kernel, not KMDB.
580	 *
581	 * We're hard-coding the three cases where KMDB has installed permanent
582	 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
583	 * to work with; we can't use a global since other CPUs can easily pass
584	 * through here at the same time.
585	 *
586	 * Note that we handle T_DBGENTR since userspace might have tried it.
587	 *
588	 * The trap handler will expect the stack to be in trap order, with %rip
589	 * being the last entry, so we'll need to restore all our regs.  On
590	 * i86xpv we'll need to compensate for XPV_TRAP_POP.
591	 *
592	 * %rax on entry is either 1 or 2, which is from kdi_trap_pass().
593	 * kdi_cmnint stashed the original %cr3 into KDIREG_CR3, then (probably)
594	 * switched us to the CPU's kf_kernel_cr3. But we're about to call, for
595	 * example:
596	 *
597	 * dbgtrap->trap()->tr_iret_kernel
598	 *
599	 * which, unlike, tr_iret_kdi, doesn't restore the original %cr3, so
600	 * we'll do so here if needed.
601	 *
602	 * This isn't just a matter of tidiness: for example, consider:
603	 *
604	 * hat_switch(oldhat=kas.a_hat, newhat=prochat)
605	 *  setcr3()
606	 *  reset_kpti()
607	 *   *brktrap* due to fbt on reset_kpti:entry
608	 *
609	 * Here, we have the new hat's %cr3, but we haven't yet updated
610	 * kf_kernel_cr3 (so its currently kas's). So if we don't restore here,
611	 * we'll stay on kas's cr3 value on returning from the trap: not good if
612	 * we fault on a userspace address.
613	 */
614	ENTRY_NP(kdi_pass_to_kernel)
615
616	popq	%rdi /* cpusave */
617	movq	$KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
618	movq	KRS_GREGS(%rdi), %rsp
619
620	cmpq	$2, %rax
621	jne	no_restore_cr3
622	movq	REG_OFF(KDIREG_CR3)(%rsp), %r11
623	movq	%r11, %cr3
624
625no_restore_cr3:
626	movq	REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
627
628	cmpq	$T_SGLSTP, %rdi
629	je	kdi_pass_dbgtrap
630	cmpq	$T_BPTFLT, %rdi
631	je	kdi_pass_brktrap
632	cmpq	$T_DBGENTR, %rdi
633	je	kdi_pass_invaltrap
634	/*
635	 * Hmm, unknown handler.  Somebody forgot to update this when they
636	 * added a new trap interposition... try to drop back into kmdb.
637	 */
638	int	$T_DBGENTR
639
640#define	CALL_TRAP_HANDLER(name) \
641	KDI_RESTORE_REGS(%rsp); \
642	/* Discard state, trapno, err */ \
643	addq	$REG_OFF(KDIREG_RIP), %rsp; \
644	XPV_TRAP_PUSH; \
645	jmp	%cs:name
646
647kdi_pass_dbgtrap:
648	CALL_TRAP_HANDLER(dbgtrap)
649	/*NOTREACHED*/
650kdi_pass_brktrap:
651	CALL_TRAP_HANDLER(brktrap)
652	/*NOTREACHED*/
653kdi_pass_invaltrap:
654	CALL_TRAP_HANDLER(invaltrap)
655	/*NOTREACHED*/
656
657	SET_SIZE(kdi_pass_to_kernel)
658
659	/*
660	 * A minimal version of mdboot(), to be used by the master CPU only.
661	 */
662	ENTRY_NP(kdi_reboot)
663
664	movl	$AD_BOOT, %edi
665	movl	$A_SHUTDOWN, %esi
666	call	*psm_shutdownf
667#if defined(__xpv)
668	movl	$SHUTDOWN_reboot, %edi
669	call	HYPERVISOR_shutdown
670#else
671	call	reset
672#endif
673	/*NOTREACHED*/
674
675	SET_SIZE(kdi_reboot)
676
677	ENTRY_NP(kdi_cpu_debug_init)
678	pushq	%rbp
679	movq	%rsp, %rbp
680
681	pushq	%rbx		/* macro will clobber %rbx */
682	KDI_RESTORE_DEBUGGING_STATE
683	popq	%rbx
684
685	leave
686	ret
687	SET_SIZE(kdi_cpu_debug_init)
688
689#define	GETDREG(name, r)	\
690	ENTRY_NP(name);		\
691	movq	r, %rax;	\
692	ret;			\
693	SET_SIZE(name)
694
695#define	SETDREG(name, r)	\
696	ENTRY_NP(name);		\
697	movq	%rdi, r;	\
698	ret;			\
699	SET_SIZE(name)
700
701	GETDREG(kdi_getdr0, %dr0)
702	GETDREG(kdi_getdr1, %dr1)
703	GETDREG(kdi_getdr2, %dr2)
704	GETDREG(kdi_getdr3, %dr3)
705	GETDREG(kdi_getdr6, %dr6)
706	GETDREG(kdi_getdr7, %dr7)
707
708	SETDREG(kdi_setdr0, %dr0)
709	SETDREG(kdi_setdr1, %dr1)
710	SETDREG(kdi_setdr2, %dr2)
711	SETDREG(kdi_setdr3, %dr3)
712	SETDREG(kdi_setdr6, %dr6)
713	SETDREG(kdi_setdr7, %dr7)
714
715