xref: /titanic_41/usr/src/uts/intel/kdi/amd64/kdi_asm.s (revision 4aac33d31b41cc7e3ac6fb66747ff2cae63d08cf)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 * Debugger entry for both master and slave CPUs
31 */
32
33#if defined(__lint)
34#include <sys/types.h>
35#endif
36
37#include <sys/segments.h>
38#include <sys/asm_linkage.h>
39#include <sys/controlregs.h>
40#include <sys/x86_archext.h>
41#include <sys/privregs.h>
42#include <sys/machprivregs.h>
43#include <sys/kdi_regs.h>
44#include <sys/psw.h>
45#include <sys/uadmin.h>
46
47#ifdef _ASM
48
49#include <kdi_assym.h>
50#include <assym.h>
51
52/* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
53#define	GET_CPUSAVE_ADDR \
54	movzbq	%gs:CPU_ID, %rbx;		\
55	movq	%rbx, %rax;			\
56	movq	$KRS_SIZE, %rcx;		\
57	mulq	%rcx;				\
58	movq	$kdi_cpusave, %rdx;		\
59	/*CSTYLED*/				\
60	addq	(%rdx), %rax
61
62/*
63 * Save copies of the IDT and GDT descriptors.  Note that we only save the IDT
64 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
65 * debugger through the trap handler.  We don't want to clobber the saved IDT
66 * in the process, as we'd end up resuming the world on our IDT.
67 */
68#define	SAVE_IDTGDT				\
69	movq	%gs:CPU_IDT, %r11;		\
70	leaq    kdi_idt(%rip), %rsi;		\
71	cmpq	%rsi, %r11;			\
72	je	1f;				\
73	movq	%r11, KRS_IDT(%rax);		\
74	movq	%gs:CPU_GDT, %r11;		\
75	movq	%r11, KRS_GDT(%rax);		\
761:
77
78/* %ss, %rsp, %rflags, %cs, %rip, %err, %trapno already on stack */
79
80#define	KDI_SAVE_REGS(base) \
81	movq	%rdi, REG_OFF(KDIREG_RDI)(base);	\
82	movq	%rsi, REG_OFF(KDIREG_RSI)(base);	\
83	movq	%rdx, REG_OFF(KDIREG_RDX)(base);	\
84	movq	%rcx, REG_OFF(KDIREG_RCX)(base);	\
85	movq	%r8, REG_OFF(KDIREG_R8)(base);		\
86	movq	%r9, REG_OFF(KDIREG_R9)(base);		\
87	movq	%rax, REG_OFF(KDIREG_RAX)(base);	\
88	movq	%rbx, REG_OFF(KDIREG_RBX)(base);	\
89	movq	%rbp, REG_OFF(KDIREG_RBP)(base);	\
90	movq	%r10, REG_OFF(KDIREG_R10)(base);	\
91	movq	%r11, REG_OFF(KDIREG_R11)(base);	\
92	movq	%r12, REG_OFF(KDIREG_R12)(base);	\
93	movq	%r13, REG_OFF(KDIREG_R13)(base);	\
94	movq	%r14, REG_OFF(KDIREG_R14)(base);	\
95	movq	%r15, REG_OFF(KDIREG_R15)(base);	\
96	movq	%rbp, REG_OFF(KDIREG_SAVFP)(base);	\
97	movq	REG_OFF(KDIREG_RIP)(base), %rax;	\
98	movq	%rax, REG_OFF(KDIREG_SAVPC)(base);	\
99	clrq	%rax;					\
100	movw	%ds, %ax;				\
101	movq	%rax, REG_OFF(KDIREG_DS)(base);		\
102	movw	%es, %ax;				\
103	movq	%rax, REG_OFF(KDIREG_ES)(base);		\
104	movw	%fs, %ax;				\
105	movq	%rax, REG_OFF(KDIREG_FS)(base);		\
106	movw	%gs, %ax;				\
107	movq	%rax, REG_OFF(KDIREG_GS)(base);		\
108	movl	$MSR_AMD_GSBASE, %ecx;			\
109	rdmsr;						\
110	shlq	$32, %rdx;				\
111	orq	%rax, %rdx;				\
112	movq	%rdx, REG_OFF(KDIREG_GSBASE)(base)
113
114#define	KDI_RESTORE_REGS(base) \
115	movq	base, %rdi;				\
116	movq	REG_OFF(KDIREG_GSBASE)(%rdi), %rdx;	\
117	movq	%rdx, %rax;				\
118	shrq	$32, %rdx;				\
119	movl	$MSR_AMD_GSBASE, %ecx;			\
120	wrmsr;						\
121	movq	REG_OFF(KDIREG_ES)(%rdi), %rax;		\
122	movw	%ax, %es;				\
123	movq	REG_OFF(KDIREG_DS)(%rdi), %rax;		\
124	movw	%ax, %ds;				\
125	movq	REG_OFF(KDIREG_R15)(%rdi), %r15;	\
126	movq	REG_OFF(KDIREG_R14)(%rdi), %r14;	\
127	movq	REG_OFF(KDIREG_R13)(%rdi), %r13;	\
128	movq	REG_OFF(KDIREG_R12)(%rdi), %r12;	\
129	movq	REG_OFF(KDIREG_R11)(%rdi), %r11;	\
130	movq	REG_OFF(KDIREG_R10)(%rdi), %r10;	\
131	movq	REG_OFF(KDIREG_RBP)(%rdi), %rbp;	\
132	movq	REG_OFF(KDIREG_RBX)(%rdi), %rbx;	\
133	movq	REG_OFF(KDIREG_RAX)(%rdi), %rax;	\
134	movq	REG_OFF(KDIREG_R9)(%rdi), %r9;		\
135	movq	REG_OFF(KDIREG_R8)(%rdi), %r8;		\
136	movq	REG_OFF(KDIREG_RCX)(%rdi), %rcx;	\
137	movq	REG_OFF(KDIREG_RDX)(%rdi), %rdx;	\
138	movq	REG_OFF(KDIREG_RSI)(%rdi), %rsi;	\
139	movq	REG_OFF(KDIREG_RDI)(%rdi), %rdi
140
141/*
142 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
143 * The following macros manage the buffer.
144 */
145
146/* Advance the ring buffer */
147#define	ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
148	movq	KRS_CURCRUMBIDX(cpusave), tmp1;	\
149	cmpq	$[KDI_NCRUMBS - 1], tmp1;	\
150	jge	1f;				\
151	/* Advance the pointer and index */	\
152	addq	$1, tmp1;			\
153	movq	tmp1, KRS_CURCRUMBIDX(cpusave);	\
154	movq	KRS_CURCRUMB(cpusave), tmp1;	\
155	addq	$KRM_SIZE, tmp1;		\
156	jmp	2f;				\
1571:	/* Reset the pointer and index */	\
158	movq	$0, KRS_CURCRUMBIDX(cpusave);	\
159	leaq	KRS_CRUMBS(cpusave), tmp1;	\
1602:	movq	tmp1, KRS_CURCRUMB(cpusave);	\
161	/* Clear the new crumb */		\
162	movq	$KDI_NCRUMBS, tmp2;		\
1633:	movq	$0, -4(tmp1, tmp2, 4);		\
164	decq	tmp2;				\
165	jnz	3b
166
167/* Set a value in the current breadcrumb buffer */
168#define	ADD_CRUMB(cpusave, offset, value, tmp) \
169	movq	KRS_CURCRUMB(cpusave), tmp;	\
170	movq	value, offset(tmp)
171
172#endif	/* _ASM */
173
174/*
175 * The main entry point for master CPUs.  It also serves as the trap handler
176 * for all traps and interrupts taken during single-step.
177 */
178#if defined(__lint)
179void
180kdi_cmnint(void)
181{
182}
183#else	/* __lint */
184
185	/* XXX implement me */
186	ENTRY_NP(kdi_nmiint)
187	clrq	%rcx
188	movq	(%rcx), %rcx
189	SET_SIZE(kdi_nmiint)
190
191	ENTRY_NP(kdi_cmnint)
192	ALTENTRY(kdi_master_entry)
193
194	pushq	%rax
195	CLI(%rax)
196	popq	%rax
197
198	/* Save current register state */
199	subq	$REG_OFF(KDIREG_TRAPNO), %rsp
200	KDI_SAVE_REGS(%rsp)
201
202	/*
203	 * Switch to the kernel's GSBASE.  Neither GSBASE nor the ill-named
204	 * KGSBASE can be trusted, as the kernel may or may not have already
205	 * done a swapgs.  All is not lost, as the kernel can divine the correct
206	 * value for us.  Note that the previous GSBASE is saved in the
207	 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
208	 * blown away.
209	 */
210	subq	$10, %rsp
211	sgdt	(%rsp)
212	movq	2(%rsp), %rdi	/* gdt base now in %rdi */
213	addq	$10, %rsp
214	call	kdi_gdt2gsbase	/* returns kernel's GSBASE in %rax */
215
216	movq	%rax, %rdx
217	shrq	$32, %rdx
218	movl	$MSR_AMD_GSBASE, %ecx
219	wrmsr
220
221	GET_CPUSAVE_ADDR	/* %rax = cpusave, %rbx = CPU ID */
222
223	ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
224
225	ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
226
227	movq	REG_OFF(KDIREG_RIP)(%rsp), %rcx
228	ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
229	ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
230	movq	REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
231	ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
232
233	movq	%rsp, %rbp
234	pushq	%rax
235
236	/*
237	 * Were we in the debugger when we took the trap (i.e. was %esp in one
238	 * of the debugger's memory ranges)?
239	 */
240	leaq	kdi_memranges, %rcx
241	movl	kdi_nmemranges, %edx
2421:	cmpq	MR_BASE(%rcx), %rsp
243	jl	2f		/* below this range -- try the next one */
244	cmpq	MR_LIM(%rcx), %rsp
245	jg	2f		/* above this range -- try the next one */
246	jmp	3f		/* matched within this range */
247
2482:	decl	%edx
249	jz	kdi_save_common_state	/* %rsp not within debugger memory */
250	addq	$MR_SIZE, %rcx
251	jmp	1b
252
2533:	/*
254	 * The master is still set.  That should only happen if we hit a trap
255	 * while running in the debugger.  Note that it may be an intentional
256	 * fault.  kmdb_dpi_handle_fault will sort it all out.
257	 */
258
259	movq	REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
260	movq	REG_OFF(KDIREG_RIP)(%rbp), %rsi
261	movq	REG_OFF(KDIREG_RSP)(%rbp), %rdx
262	movq	%rbx, %rcx		/* cpuid */
263
264	call	kdi_dvec_handle_fault
265
266	/*
267	 * If we're here, we ran into a debugger problem, and the user
268	 * elected to solve it by having the debugger debug itself.  The
269	 * state we're about to save is that of the debugger when it took
270	 * the fault.
271	 */
272
273	jmp	kdi_save_common_state
274
275	SET_SIZE(kdi_master_entry)
276	SET_SIZE(kdi_cmnint)
277
278#endif	/* __lint */
279
280/*
281 * The cross-call handler for slave CPUs.
282 *
283 * The debugger is single-threaded, so only one CPU, called the master, may be
284 * running it at any given time.  The other CPUs, known as slaves, spin in a
285 * busy loop until there's something for them to do.  This is the entry point
286 * for the slaves - they'll be sent here in response to a cross-call sent by the
287 * master.
288 */
289
290#if defined(__lint)
291char kdi_slave_entry_patch;
292
293void
294kdi_slave_entry(void)
295{
296}
297#else /* __lint */
298	.globl	kdi_slave_entry_patch;
299
300	ENTRY_NP(kdi_slave_entry)
301
302	/* kdi_msr_add_clrentry knows where this is */
303kdi_slave_entry_patch:
304	KDI_MSR_PATCH;
305
306	/*
307	 * Cross calls are implemented as function calls, so our stack currently
308	 * looks like one you'd get from a zero-argument function call.  That
309	 * is, there's the return %rip at %rsp, and that's about it.  We need
310	 * to make it look like an interrupt stack.  When we first save, we'll
311	 * reverse the saved %ss and %rip, which we'll fix back up when we've
312	 * freed up some general-purpose registers.  We'll also need to fix up
313	 * the saved %rsp.
314	 */
315
316	pushq	%rsp		/* pushed value off by 8 */
317	pushfq
318	CLI(%rax)
319	pushq	$KCS_SEL
320	clrq	%rax
321	movw	%ss, %ax
322	pushq	%rax		/* rip should be here */
323	pushq	$-1		/* phony trap error code */
324	pushq	$-1		/* phony trap number */
325
326	subq	$REG_OFF(KDIREG_TRAPNO), %rsp
327	KDI_SAVE_REGS(%rsp)
328
329	movq	REG_OFF(KDIREG_SS)(%rsp), %rax
330	xchgq	REG_OFF(KDIREG_RIP)(%rsp), %rax
331	movq	%rax, REG_OFF(KDIREG_SS)(%rsp)
332
333	movq	REG_OFF(KDIREG_RSP)(%rsp), %rax
334	addq	$8, %rax
335	movq	%rax, REG_OFF(KDIREG_RSP)(%rsp)
336
337	/*
338	 * We've saved all of the general-purpose registers, and have a stack
339	 * that is irettable (after we strip down to the error code)
340	 */
341
342	GET_CPUSAVE_ADDR	/* %rax = cpusave, %rbx = CPU ID */
343
344	ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
345
346	ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
347
348	movq	REG_OFF(KDIREG_RIP)(%rsp), %rcx
349	ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
350
351	pushq	%rax
352	jmp	kdi_save_common_state
353
354	SET_SIZE(kdi_slave_entry)
355
356#endif	/* __lint */
357
358#if !defined(__lint)
359
360	ENTRY_NP(kdi_save_common_state)
361
362	/*
363	 * The state of the world:
364	 *
365	 * The stack has a complete set of saved registers and segment
366	 * selectors, arranged in the kdi_regs.h order.  It also has a pointer
367	 * to our cpusave area.
368	 *
369	 * We need to save, into the cpusave area, a pointer to these saved
370	 * registers.  After that, we save a few more registers, ready the
371	 * machine for debugger entry, and enter the debugger.
372	 */
373
374	popq	%rax			/* the cpusave area */
375	movq	%rsp, KRS_GREGS(%rax)	/* save ptr to current saved regs */
376
377	SAVE_IDTGDT
378
379	/* Save off %cr0, and clear write protect */
380	movq	%cr0, %rcx
381	movq	%rcx, KRS_CR0(%rax)
382	andq	$_BITNOT(CR0_WP), %rcx
383	movq	%rcx, %cr0
384
385	/* Save the debug registers and disable any active watchpoints */
386
387	movq	%rax, %r15		/* save cpusave area ptr */
388	movl	$7, %edi
389	call	kdi_dreg_get
390	movq	%rax, KRS_DRCTL(%r15)
391
392	andq	$_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
393	movq	%rax, %rsi
394	movl	$7, %edi
395	call	kdi_dreg_set
396
397	movl	$6, %edi
398	call	kdi_dreg_get
399	movq	%rax, KRS_DRSTAT(%r15)
400
401	movl	$0, %edi
402	call	kdi_dreg_get
403	movq	%rax, KRS_DROFF(0)(%r15)
404
405	movl	$1, %edi
406	call	kdi_dreg_get
407	movq	%rax, KRS_DROFF(1)(%r15)
408
409	movl	$2, %edi
410	call	kdi_dreg_get
411	movq	%rax, KRS_DROFF(2)(%r15)
412
413	movl	$3, %edi
414	call	kdi_dreg_get
415	movq	%rax, KRS_DROFF(3)(%r15)
416
417	movq	%r15, %rax	/* restore cpu save area to rax */
418
419	/*
420	 * Save any requested MSRs.
421	 */
422	movq	KRS_MSR(%rax), %rcx
423	cmpq	$0, %rcx
424	je	no_msr
425
426	pushq	%rax		/* rdmsr clobbers %eax */
427	movq	%rcx, %rbx
428
4291:
430	movl	MSR_NUM(%rbx), %ecx
431	cmpl	$0, %ecx
432	je	msr_done
433
434	movl	MSR_TYPE(%rbx), %edx
435	cmpl	$KDI_MSR_READ, %edx
436	jne	msr_next
437
438	rdmsr			/* addr in %ecx, value into %edx:%eax */
439	movl	%eax, MSR_VAL(%rbx)
440	movl	%edx, _CONST(MSR_VAL + 4)(%rbx)
441
442msr_next:
443	addq	$MSR_SIZE, %rbx
444	jmp	1b
445
446msr_done:
447	popq	%rax
448
449no_msr:
450	clrq	%rbp		/* stack traces should end here */
451
452	pushq	%rax
453	movq	%rax, %rdi	/* cpusave */
454
455	call	kdi_debugger_entry
456
457	/* Pass cpusave and debugger return code for "call" to resume */
458	popq	%rdi
459	movq	%rax, %rsi
460
461	jmp	kdi_resume
462
463	SET_SIZE(kdi_save_common_state)
464
465#endif	/* !__lint */
466
467/*
468 * Given the address of the current CPU's cpusave area in %rax, the following
469 * macro restores the debugging state to said CPU.  Restored state includes
470 * the debug registers from the global %dr variables, and debugging MSRs from
471 * the CPU save area.  This code would be in a separate routine, but for the
472 * fact that some of the MSRs are jump-sensitive.  As such, we need to minimize
473 * the number of jumps taken subsequent to the update of said MSRs.  We can
474 * remove one jump (the ret) by using a macro instead of a function for the
475 * debugging state restoration code.
476 *
477 * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
478 */
479#define	KDI_RESTORE_DEBUGGING_STATE \
480	pushq	%rdi;						\
481	leaq	kdi_drreg, %r15;				\
482	movl	$7, %edi;					\
483	movq	DR_CTL(%r15), %rsi;				\
484	call	kdi_dreg_set;					\
485								\
486	movl	$6, %edi;					\
487	movq	$KDIREG_DRSTAT_RESERVED, %rsi;			\
488	call	kdi_dreg_set;					\
489								\
490	movl	$0, %edi;					\
491	movq	DRADDR_OFF(0)(%r15), %rsi;			\
492	call	kdi_dreg_set;					\
493	movl	$1, %edi;					\
494	movq	DRADDR_OFF(1)(%r15), %rsi;			\
495	call	kdi_dreg_set;					\
496	movl	$2, %edi;					\
497	movq	DRADDR_OFF(2)(%r15), %rsi;			\
498	call	kdi_dreg_set;					\
499	movl	$3, %edi;					\
500	movq	DRADDR_OFF(3)(%r15), %rsi;			\
501	call	kdi_dreg_set;					\
502	popq	%rdi;						\
503								\
504	/*							\
505	 * Write any requested MSRs.				\
506	 */							\
507	movq	KRS_MSR(%rdi), %rbx;				\
508	cmpq	$0, %rbx;					\
509	je	3f;						\
5101:								\
511	movl	MSR_NUM(%rbx), %ecx;				\
512	cmpl	$0, %ecx;					\
513	je	3f;						\
514								\
515	movl	MSR_TYPE(%rbx), %edx;				\
516	cmpl	$KDI_MSR_WRITE, %edx;				\
517	jne	2f;						\
518								\
519	movq	MSR_VALP(%rbx), %rdx;				\
520	movl	0(%rdx), %eax;					\
521	movl	4(%rdx), %edx;					\
522	wrmsr;							\
5232:								\
524	addq	$MSR_SIZE, %rbx;				\
525	jmp	1b;						\
5263:								\
527	/*							\
528	 * We must not branch after re-enabling LBR.  If	\
529	 * kdi_wsr_wrexit_msr is set, it contains the number	\
530	 * of the MSR that controls LBR.  kdi_wsr_wrexit_valp	\
531	 * contains the value that is to be written to enable	\
532	 * LBR.							\
533	 */							\
534	movl	kdi_msr_wrexit_msr, %ecx;			\
535	cmpl	$0, %ecx;					\
536	je	1f;						\
537								\
538	movq	kdi_msr_wrexit_valp, %rdx;			\
539	movl	0(%rdx), %eax;					\
540	movl	4(%rdx), %edx;					\
541								\
542	wrmsr;							\
5431:
544
545#if defined(__lint)
546/*ARGSUSED*/
547void
548kdi_cpu_debug_init(kdi_cpusave_t *save)
549{
550}
551#else	/* __lint */
552
553	ENTRY_NP(kdi_cpu_debug_init)
554	pushq	%rbp
555	movq	%rsp, %rbp
556
557	pushq	%rbx		/* macro will clobber %rbx */
558	KDI_RESTORE_DEBUGGING_STATE
559	popq	%rbx
560
561	leave
562	ret
563
564	SET_SIZE(kdi_cpu_debug_init)
565#endif	/* !__lint */
566
567/*
568 * Resume the world.  The code that calls kdi_resume has already
569 * decided whether or not to restore the IDT.
570 */
571#if defined(__lint)
572void
573kdi_resume(void)
574{
575}
576#else	/* __lint */
577
578	ENTRY_NP(kdi_resume)
579
580	/* cpusave in %rdi, debugger command in %rsi */
581
582	cmpq	$KDI_RESUME_PASS_TO_KERNEL, %rsi
583	je	kdi_pass_to_kernel
584
585	/*
586	 * Send this CPU back into the world
587	 */
588	movq	KRS_CR0(%rdi), %rdx
589	movq	%rdx, %cr0
590
591	KDI_RESTORE_DEBUGGING_STATE
592
593	movq	KRS_GREGS(%rdi), %rsp
594	KDI_RESTORE_REGS(%rsp)
595	addq	$REG_OFF(KDIREG_RIP), %rsp	/* Discard state, trapno, err */
596	IRET
597	/*NOTREACHED*/
598	SET_SIZE(kdi_resume)
599
600#endif	/* __lint */
601
602#if !defined(__lint)
603
604	ENTRY_NP(kdi_pass_to_kernel)
605
606	/* cpusave is still in %rdi */
607	movq	KRS_CR0(%rdi), %rdx
608	movq	%rdx, %cr0
609
610	/*
611	 * When we replaced the kernel's handlers in the IDT, we made note of
612	 * the handlers being replaced, thus allowing us to pass traps directly
613	 * to said handlers here.  We won't have any registers available for use
614	 * after we start popping, and we know we're single-threaded here, so
615	 * we have to use a global to store the handler address.
616	 */
617	movq	KRS_GREGS(%rdi), %rsp
618	movq	REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
619	call	kdi_kernel_trap2hdlr
620	movq	%rax, kdi_kernel_handler
621
622	/*
623	 * The trap handler will expect the stack to be in trap order, with
624	 * %rip being the last entry.  Our stack is currently in kdi_regs.h
625	 * order, so we'll need to pop (and restore) our way back down.
626	 */
627	KDI_RESTORE_REGS(%rsp)
628	addq	$REG_OFF(KDIREG_RIP), %rsp	/* Discard state, trapno, err */
629
630	jmp	*%cs:kdi_kernel_handler
631	/*NOTREACHED*/
632
633	SET_SIZE(kdi_pass_to_kernel)
634
635	/*
636	 * Reboot the system.  This routine is to be called only by the master
637	 * CPU.
638	 */
639	ENTRY_NP(kdi_reboot)
640
641	movl	$AD_BOOT, %edi
642	movl	$A_SHUTDOWN, %esi
643	call	*psm_shutdownf
644
645	/*
646	 * psm_shutdown didn't work or it wasn't set, try pc_reset.
647	 */
648	call	pc_reset
649	/*NOTREACHED*/
650
651	SET_SIZE(kdi_reboot)
652
653#endif	/* !__lint */
654