xref: /titanic_52/usr/src/uts/intel/kdi/amd64/kdi_asm.s (revision 843e19887f64dde75055cf8842fc4db2171eff45)
1ae115bc7Smrj/*
2ae115bc7Smrj * CDDL HEADER START
3ae115bc7Smrj *
4ae115bc7Smrj * The contents of this file are subject to the terms of the
5ae115bc7Smrj * Common Development and Distribution License (the "License").
6ae115bc7Smrj * You may not use this file except in compliance with the License.
7ae115bc7Smrj *
8ae115bc7Smrj * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9ae115bc7Smrj * or http://www.opensolaris.org/os/licensing.
10ae115bc7Smrj * See the License for the specific language governing permissions
11ae115bc7Smrj * and limitations under the License.
12ae115bc7Smrj *
13ae115bc7Smrj * When distributing Covered Code, include this CDDL HEADER in each
14ae115bc7Smrj * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15ae115bc7Smrj * If applicable, add the following below this CDDL HEADER, with the
16ae115bc7Smrj * fields enclosed by brackets "[]" replaced with your own identifying
17ae115bc7Smrj * information: Portions Copyright [yyyy] [name of copyright owner]
18ae115bc7Smrj *
19ae115bc7Smrj * CDDL HEADER END
20ae115bc7Smrj */
21ae115bc7Smrj
22ae115bc7Smrj/*
23ae115bc7Smrj * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24ae115bc7Smrj * Use is subject to license terms.
25ae115bc7Smrj */
26ae115bc7Smrj
27ae115bc7Smrj#pragma ident	"%Z%%M%	%I%	%E% SMI"
28ae115bc7Smrj
29ae115bc7Smrj/*
30ae115bc7Smrj * Debugger entry for both master and slave CPUs
31ae115bc7Smrj */
32ae115bc7Smrj
33ae115bc7Smrj#if defined(__lint)
34ae115bc7Smrj#include <sys/types.h>
35ae115bc7Smrj#endif
36ae115bc7Smrj
37ae115bc7Smrj#include <sys/segments.h>
38ae115bc7Smrj#include <sys/asm_linkage.h>
39ae115bc7Smrj#include <sys/controlregs.h>
40ae115bc7Smrj#include <sys/x86_archext.h>
41ae115bc7Smrj#include <sys/privregs.h>
42ae115bc7Smrj#include <sys/machprivregs.h>
43ae115bc7Smrj#include <sys/kdi_regs.h>
44ae115bc7Smrj#include <sys/psw.h>
45ae115bc7Smrj#include <sys/uadmin.h>
46*843e1988Sjohnlev#ifdef __xpv
47*843e1988Sjohnlev#include <sys/hypervisor.h>
48*843e1988Sjohnlev#endif
49ae115bc7Smrj
50ae115bc7Smrj#ifdef _ASM
51ae115bc7Smrj
52ae115bc7Smrj#include <kdi_assym.h>
53ae115bc7Smrj#include <assym.h>
54ae115bc7Smrj
55ae115bc7Smrj/* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
56ae115bc7Smrj#define	GET_CPUSAVE_ADDR \
57ae115bc7Smrj	movzbq	%gs:CPU_ID, %rbx;		\
58ae115bc7Smrj	movq	%rbx, %rax;			\
59ae115bc7Smrj	movq	$KRS_SIZE, %rcx;		\
60ae115bc7Smrj	mulq	%rcx;				\
61ae115bc7Smrj	movq	$kdi_cpusave, %rdx;		\
62ae115bc7Smrj	/*CSTYLED*/				\
63ae115bc7Smrj	addq	(%rdx), %rax
64ae115bc7Smrj
65ae115bc7Smrj/*
66ae115bc7Smrj * Save copies of the IDT and GDT descriptors.  Note that we only save the IDT
67ae115bc7Smrj * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
68ae115bc7Smrj * debugger through the trap handler.  We don't want to clobber the saved IDT
69ae115bc7Smrj * in the process, as we'd end up resuming the world on our IDT.
70ae115bc7Smrj */
71ae115bc7Smrj#define	SAVE_IDTGDT				\
72ae115bc7Smrj	movq	%gs:CPU_IDT, %r11;		\
73ae115bc7Smrj	leaq    kdi_idt(%rip), %rsi;		\
74ae115bc7Smrj	cmpq	%rsi, %r11;			\
75ae115bc7Smrj	je	1f;				\
76ae115bc7Smrj	movq	%r11, KRS_IDT(%rax);		\
77ae115bc7Smrj	movq	%gs:CPU_GDT, %r11;		\
78ae115bc7Smrj	movq	%r11, KRS_GDT(%rax);		\
79ae115bc7Smrj1:
80ae115bc7Smrj
81*843e1988Sjohnlev#ifdef __xpv
82ae115bc7Smrj
83*843e1988Sjohnlev#define	SAVE_GSBASE(reg) /* nothing */
84*843e1988Sjohnlev#define	RESTORE_GSBASE(reg) /* nothing */
85*843e1988Sjohnlev
86*843e1988Sjohnlev#else
87*843e1988Sjohnlev
88*843e1988Sjohnlev#define	SAVE_GSBASE(base)				\
89*843e1988Sjohnlev	movl	$MSR_AMD_GSBASE, %ecx;			\
90*843e1988Sjohnlev	rdmsr;						\
91*843e1988Sjohnlev	shlq	$32, %rdx;				\
92*843e1988Sjohnlev	orq	%rax, %rdx;				\
93*843e1988Sjohnlev	movq	%rdx, REG_OFF(KDIREG_GSBASE)(base)
94*843e1988Sjohnlev
95*843e1988Sjohnlev#define	RESTORE_GSBASE(base)				\
96*843e1988Sjohnlev	movq	REG_OFF(KDIREG_GSBASE)(base), %rdx;	\
97*843e1988Sjohnlev	movq	%rdx, %rax;				\
98*843e1988Sjohnlev	shrq	$32, %rdx;				\
99*843e1988Sjohnlev	movl	$MSR_AMD_GSBASE, %ecx;			\
100*843e1988Sjohnlev	wrmsr
101*843e1988Sjohnlev
102*843e1988Sjohnlev#endif /* __xpv */
103*843e1988Sjohnlev
104*843e1988Sjohnlev/*
105*843e1988Sjohnlev * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack.  Note
106*843e1988Sjohnlev * that on the hypervisor, we skip the save/restore of GSBASE: it's slow, and
107*843e1988Sjohnlev * unnecessary.
108*843e1988Sjohnlev */
109ae115bc7Smrj#define	KDI_SAVE_REGS(base) \
110ae115bc7Smrj	movq	%rdi, REG_OFF(KDIREG_RDI)(base);	\
111ae115bc7Smrj	movq	%rsi, REG_OFF(KDIREG_RSI)(base);	\
112ae115bc7Smrj	movq	%rdx, REG_OFF(KDIREG_RDX)(base);	\
113ae115bc7Smrj	movq	%rcx, REG_OFF(KDIREG_RCX)(base);	\
114ae115bc7Smrj	movq	%r8, REG_OFF(KDIREG_R8)(base);		\
115ae115bc7Smrj	movq	%r9, REG_OFF(KDIREG_R9)(base);		\
116ae115bc7Smrj	movq	%rax, REG_OFF(KDIREG_RAX)(base);	\
117ae115bc7Smrj	movq	%rbx, REG_OFF(KDIREG_RBX)(base);	\
118ae115bc7Smrj	movq	%rbp, REG_OFF(KDIREG_RBP)(base);	\
119ae115bc7Smrj	movq	%r10, REG_OFF(KDIREG_R10)(base);	\
120ae115bc7Smrj	movq	%r11, REG_OFF(KDIREG_R11)(base);	\
121ae115bc7Smrj	movq	%r12, REG_OFF(KDIREG_R12)(base);	\
122ae115bc7Smrj	movq	%r13, REG_OFF(KDIREG_R13)(base);	\
123ae115bc7Smrj	movq	%r14, REG_OFF(KDIREG_R14)(base);	\
124ae115bc7Smrj	movq	%r15, REG_OFF(KDIREG_R15)(base);	\
125ae115bc7Smrj	movq	%rbp, REG_OFF(KDIREG_SAVFP)(base);	\
126ae115bc7Smrj	movq	REG_OFF(KDIREG_RIP)(base), %rax;	\
127ae115bc7Smrj	movq	%rax, REG_OFF(KDIREG_SAVPC)(base);	\
128ae115bc7Smrj	clrq	%rax;					\
129ae115bc7Smrj	movw	%ds, %ax;				\
130ae115bc7Smrj	movq	%rax, REG_OFF(KDIREG_DS)(base);		\
131ae115bc7Smrj	movw	%es, %ax;				\
132ae115bc7Smrj	movq	%rax, REG_OFF(KDIREG_ES)(base);		\
133ae115bc7Smrj	movw	%fs, %ax;				\
134ae115bc7Smrj	movq	%rax, REG_OFF(KDIREG_FS)(base);		\
135ae115bc7Smrj	movw	%gs, %ax;				\
136ddece0baSsethg	movq	%rax, REG_OFF(KDIREG_GS)(base);		\
137*843e1988Sjohnlev	SAVE_GSBASE(base)
138ae115bc7Smrj
139ae115bc7Smrj#define	KDI_RESTORE_REGS(base) \
140ae115bc7Smrj	movq	base, %rdi;				\
141*843e1988Sjohnlev	RESTORE_GSBASE(%rdi);				\
142ae115bc7Smrj	movq	REG_OFF(KDIREG_ES)(%rdi), %rax;		\
143ae115bc7Smrj	movw	%ax, %es;				\
144ae115bc7Smrj	movq	REG_OFF(KDIREG_DS)(%rdi), %rax;		\
145ae115bc7Smrj	movw	%ax, %ds;				\
146ae115bc7Smrj	movq	REG_OFF(KDIREG_R15)(%rdi), %r15;	\
147ae115bc7Smrj	movq	REG_OFF(KDIREG_R14)(%rdi), %r14;	\
148ae115bc7Smrj	movq	REG_OFF(KDIREG_R13)(%rdi), %r13;	\
149ae115bc7Smrj	movq	REG_OFF(KDIREG_R12)(%rdi), %r12;	\
150ae115bc7Smrj	movq	REG_OFF(KDIREG_R11)(%rdi), %r11;	\
151ae115bc7Smrj	movq	REG_OFF(KDIREG_R10)(%rdi), %r10;	\
152ae115bc7Smrj	movq	REG_OFF(KDIREG_RBP)(%rdi), %rbp;	\
153ae115bc7Smrj	movq	REG_OFF(KDIREG_RBX)(%rdi), %rbx;	\
154ae115bc7Smrj	movq	REG_OFF(KDIREG_RAX)(%rdi), %rax;	\
155ae115bc7Smrj	movq	REG_OFF(KDIREG_R9)(%rdi), %r9;		\
156ae115bc7Smrj	movq	REG_OFF(KDIREG_R8)(%rdi), %r8;		\
157ae115bc7Smrj	movq	REG_OFF(KDIREG_RCX)(%rdi), %rcx;	\
158ae115bc7Smrj	movq	REG_OFF(KDIREG_RDX)(%rdi), %rdx;	\
159ae115bc7Smrj	movq	REG_OFF(KDIREG_RSI)(%rdi), %rsi;	\
160ae115bc7Smrj	movq	REG_OFF(KDIREG_RDI)(%rdi), %rdi
161ae115bc7Smrj
162ae115bc7Smrj/*
163*843e1988Sjohnlev * Given the address of the current CPU's cpusave area in %rax, the following
164*843e1988Sjohnlev * macro restores the debugging state to said CPU.  Restored state includes
165*843e1988Sjohnlev * the debug registers from the global %dr variables, and debugging MSRs from
166*843e1988Sjohnlev * the CPU save area.  This code would be in a separate routine, but for the
167*843e1988Sjohnlev * fact that some of the MSRs are jump-sensitive.  As such, we need to minimize
168*843e1988Sjohnlev * the number of jumps taken subsequent to the update of said MSRs.  We can
169*843e1988Sjohnlev * remove one jump (the ret) by using a macro instead of a function for the
170*843e1988Sjohnlev * debugging state restoration code.
171*843e1988Sjohnlev *
172*843e1988Sjohnlev * Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
173*843e1988Sjohnlev */
174*843e1988Sjohnlev#define	KDI_RESTORE_DEBUGGING_STATE \
175*843e1988Sjohnlev	pushq	%rdi;						\
176*843e1988Sjohnlev	leaq	kdi_drreg(%rip), %r15;				\
177*843e1988Sjohnlev	movl	$7, %edi;					\
178*843e1988Sjohnlev	movq	DR_CTL(%r15), %rsi;				\
179*843e1988Sjohnlev	call	kdi_dreg_set;					\
180*843e1988Sjohnlev								\
181*843e1988Sjohnlev	movl	$6, %edi;					\
182*843e1988Sjohnlev	movq	$KDIREG_DRSTAT_RESERVED, %rsi;			\
183*843e1988Sjohnlev	call	kdi_dreg_set;					\
184*843e1988Sjohnlev								\
185*843e1988Sjohnlev	movl	$0, %edi;					\
186*843e1988Sjohnlev	movq	DRADDR_OFF(0)(%r15), %rsi;			\
187*843e1988Sjohnlev	call	kdi_dreg_set;					\
188*843e1988Sjohnlev	movl	$1, %edi;					\
189*843e1988Sjohnlev	movq	DRADDR_OFF(1)(%r15), %rsi;			\
190*843e1988Sjohnlev	call	kdi_dreg_set;					\
191*843e1988Sjohnlev	movl	$2, %edi;					\
192*843e1988Sjohnlev	movq	DRADDR_OFF(2)(%r15), %rsi;			\
193*843e1988Sjohnlev	call	kdi_dreg_set;					\
194*843e1988Sjohnlev	movl	$3, %edi;					\
195*843e1988Sjohnlev	movq	DRADDR_OFF(3)(%r15), %rsi;			\
196*843e1988Sjohnlev	call	kdi_dreg_set;					\
197*843e1988Sjohnlev	popq	%rdi;						\
198*843e1988Sjohnlev								\
199*843e1988Sjohnlev	/*							\
200*843e1988Sjohnlev	 * Write any requested MSRs.				\
201*843e1988Sjohnlev	 */							\
202*843e1988Sjohnlev	movq	KRS_MSR(%rdi), %rbx;				\
203*843e1988Sjohnlev	cmpq	$0, %rbx;					\
204*843e1988Sjohnlev	je	3f;						\
205*843e1988Sjohnlev1:								\
206*843e1988Sjohnlev	movl	MSR_NUM(%rbx), %ecx;				\
207*843e1988Sjohnlev	cmpl	$0, %ecx;					\
208*843e1988Sjohnlev	je	3f;						\
209*843e1988Sjohnlev								\
210*843e1988Sjohnlev	movl	MSR_TYPE(%rbx), %edx;				\
211*843e1988Sjohnlev	cmpl	$KDI_MSR_WRITE, %edx;				\
212*843e1988Sjohnlev	jne	2f;						\
213*843e1988Sjohnlev								\
214*843e1988Sjohnlev	movq	MSR_VALP(%rbx), %rdx;				\
215*843e1988Sjohnlev	movl	0(%rdx), %eax;					\
216*843e1988Sjohnlev	movl	4(%rdx), %edx;					\
217*843e1988Sjohnlev	wrmsr;							\
218*843e1988Sjohnlev2:								\
219*843e1988Sjohnlev	addq	$MSR_SIZE, %rbx;				\
220*843e1988Sjohnlev	jmp	1b;						\
221*843e1988Sjohnlev3:								\
222*843e1988Sjohnlev	/*							\
223*843e1988Sjohnlev	 * We must not branch after re-enabling LBR.  If	\
224*843e1988Sjohnlev	 * kdi_wsr_wrexit_msr is set, it contains the number	\
225*843e1988Sjohnlev	 * of the MSR that controls LBR.  kdi_wsr_wrexit_valp	\
226*843e1988Sjohnlev	 * contains the value that is to be written to enable	\
227*843e1988Sjohnlev	 * LBR.							\
228*843e1988Sjohnlev	 */							\
229*843e1988Sjohnlev	leaq	kdi_msr_wrexit_msr(%rip), %rcx;			\
230*843e1988Sjohnlev	movl	(%rcx), %ecx;					\
231*843e1988Sjohnlev	cmpl	$0, %ecx;					\
232*843e1988Sjohnlev	je	1f;						\
233*843e1988Sjohnlev								\
234*843e1988Sjohnlev	leaq	kdi_msr_wrexit_valp(%rip), %rdx;		\
235*843e1988Sjohnlev	movq	(%rdx), %rdx;					\
236*843e1988Sjohnlev	movl	0(%rdx), %eax;					\
237*843e1988Sjohnlev	movl	4(%rdx), %edx;					\
238*843e1988Sjohnlev								\
239*843e1988Sjohnlev	wrmsr;							\
240*843e1988Sjohnlev1:
241*843e1988Sjohnlev
242*843e1988Sjohnlev/*
243ae115bc7Smrj * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
244ae115bc7Smrj * The following macros manage the buffer.
245ae115bc7Smrj */
246ae115bc7Smrj
247ae115bc7Smrj/* Advance the ring buffer */
248ae115bc7Smrj#define	ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
249ae115bc7Smrj	movq	KRS_CURCRUMBIDX(cpusave), tmp1;	\
250ae115bc7Smrj	cmpq	$[KDI_NCRUMBS - 1], tmp1;	\
251ae115bc7Smrj	jge	1f;				\
252ae115bc7Smrj	/* Advance the pointer and index */	\
253ae115bc7Smrj	addq	$1, tmp1;			\
254ae115bc7Smrj	movq	tmp1, KRS_CURCRUMBIDX(cpusave);	\
255ae115bc7Smrj	movq	KRS_CURCRUMB(cpusave), tmp1;	\
256ae115bc7Smrj	addq	$KRM_SIZE, tmp1;		\
257ae115bc7Smrj	jmp	2f;				\
258ae115bc7Smrj1:	/* Reset the pointer and index */	\
259ae115bc7Smrj	movq	$0, KRS_CURCRUMBIDX(cpusave);	\
260ae115bc7Smrj	leaq	KRS_CRUMBS(cpusave), tmp1;	\
261ae115bc7Smrj2:	movq	tmp1, KRS_CURCRUMB(cpusave);	\
262ae115bc7Smrj	/* Clear the new crumb */		\
263ae115bc7Smrj	movq	$KDI_NCRUMBS, tmp2;		\
264ae115bc7Smrj3:	movq	$0, -4(tmp1, tmp2, 4);		\
265ae115bc7Smrj	decq	tmp2;				\
266ae115bc7Smrj	jnz	3b
267ae115bc7Smrj
268ae115bc7Smrj/* Set a value in the current breadcrumb buffer */
269ae115bc7Smrj#define	ADD_CRUMB(cpusave, offset, value, tmp) \
270ae115bc7Smrj	movq	KRS_CURCRUMB(cpusave), tmp;	\
271ae115bc7Smrj	movq	value, offset(tmp)
272ae115bc7Smrj
273ae115bc7Smrj#endif	/* _ASM */
274ae115bc7Smrj
275ae115bc7Smrj#if defined(__lint)
276ae115bc7Smrjvoid
277ae115bc7Smrjkdi_cmnint(void)
278ae115bc7Smrj{
279ae115bc7Smrj}
280ae115bc7Smrj#else	/* __lint */
281ae115bc7Smrj
282ae115bc7Smrj	/* XXX implement me */
283ae115bc7Smrj	ENTRY_NP(kdi_nmiint)
284ae115bc7Smrj	clrq	%rcx
285ae115bc7Smrj	movq	(%rcx), %rcx
286ae115bc7Smrj	SET_SIZE(kdi_nmiint)
287ae115bc7Smrj
288*843e1988Sjohnlev	/*
289*843e1988Sjohnlev	 * The main entry point for master CPUs.  It also serves as the trap
290*843e1988Sjohnlev	 * handler for all traps and interrupts taken during single-step.
291*843e1988Sjohnlev	 */
292ae115bc7Smrj	ENTRY_NP(kdi_cmnint)
293ae115bc7Smrj	ALTENTRY(kdi_master_entry)
294ae115bc7Smrj
295ae115bc7Smrj	pushq	%rax
296ae115bc7Smrj	CLI(%rax)
297ae115bc7Smrj	popq	%rax
298ae115bc7Smrj
299ae115bc7Smrj	/* Save current register state */
300ae115bc7Smrj	subq	$REG_OFF(KDIREG_TRAPNO), %rsp
301ae115bc7Smrj	KDI_SAVE_REGS(%rsp)
302ae115bc7Smrj
303*843e1988Sjohnlev#ifdef __xpv
304*843e1988Sjohnlev	/*
305*843e1988Sjohnlev	 * Clear saved_upcall_mask in unused byte of cs slot on stack.
306*843e1988Sjohnlev	 * It can only confuse things.
307*843e1988Sjohnlev	 */
308*843e1988Sjohnlev	movb	$0, REG_OFF(KDIREG_CS)+4(%rsp)
309*843e1988Sjohnlev#endif
310*843e1988Sjohnlev
311*843e1988Sjohnlev#if !defined(__xpv)
312ae115bc7Smrj	/*
313ae115bc7Smrj	 * Switch to the kernel's GSBASE.  Neither GSBASE nor the ill-named
314ae115bc7Smrj	 * KGSBASE can be trusted, as the kernel may or may not have already
315ae115bc7Smrj	 * done a swapgs.  All is not lost, as the kernel can divine the correct
316ddece0baSsethg	 * value for us.  Note that the previous GSBASE is saved in the
317ddece0baSsethg	 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
318*843e1988Sjohnlev	 * blown away.  On the hypervisor, we don't need to do this, since it's
319*843e1988Sjohnlev	 * ensured we're on our requested kernel GSBASE already.
320ae115bc7Smrj	 */
321ae115bc7Smrj	subq	$10, %rsp
322ae115bc7Smrj	sgdt	(%rsp)
323ae115bc7Smrj	movq	2(%rsp), %rdi	/* gdt base now in %rdi */
324ae115bc7Smrj	addq	$10, %rsp
325ae115bc7Smrj	call	kdi_gdt2gsbase	/* returns kernel's GSBASE in %rax */
326ae115bc7Smrj
327ae115bc7Smrj	movq	%rax, %rdx
328ae115bc7Smrj	shrq	$32, %rdx
329ae115bc7Smrj	movl	$MSR_AMD_GSBASE, %ecx
330ae115bc7Smrj	wrmsr
331*843e1988Sjohnlev#endif	/* __xpv */
332ae115bc7Smrj
333ae115bc7Smrj	GET_CPUSAVE_ADDR	/* %rax = cpusave, %rbx = CPU ID */
334ae115bc7Smrj
335ae115bc7Smrj	ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
336ae115bc7Smrj
337ae115bc7Smrj	ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
338ae115bc7Smrj
339ae115bc7Smrj	movq	REG_OFF(KDIREG_RIP)(%rsp), %rcx
340ae115bc7Smrj	ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
341ae115bc7Smrj	ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
342ae115bc7Smrj	movq	REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
343ae115bc7Smrj	ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
344ae115bc7Smrj
345ae115bc7Smrj	movq	%rsp, %rbp
346ae115bc7Smrj	pushq	%rax
347ae115bc7Smrj
348ae115bc7Smrj	/*
349ae115bc7Smrj	 * Were we in the debugger when we took the trap (i.e. was %esp in one
350ae115bc7Smrj	 * of the debugger's memory ranges)?
351ae115bc7Smrj	 */
352ae115bc7Smrj	leaq	kdi_memranges, %rcx
353ae115bc7Smrj	movl	kdi_nmemranges, %edx
354ae115bc7Smrj1:	cmpq	MR_BASE(%rcx), %rsp
355ae115bc7Smrj	jl	2f		/* below this range -- try the next one */
356ae115bc7Smrj	cmpq	MR_LIM(%rcx), %rsp
357ae115bc7Smrj	jg	2f		/* above this range -- try the next one */
358ae115bc7Smrj	jmp	3f		/* matched within this range */
359ae115bc7Smrj
360ae115bc7Smrj2:	decl	%edx
361ae115bc7Smrj	jz	kdi_save_common_state	/* %rsp not within debugger memory */
362ae115bc7Smrj	addq	$MR_SIZE, %rcx
363ae115bc7Smrj	jmp	1b
364ae115bc7Smrj
365ae115bc7Smrj3:	/*
366ae115bc7Smrj	 * The master is still set.  That should only happen if we hit a trap
367ae115bc7Smrj	 * while running in the debugger.  Note that it may be an intentional
368ae115bc7Smrj	 * fault.  kmdb_dpi_handle_fault will sort it all out.
369ae115bc7Smrj	 */
370ae115bc7Smrj
371ae115bc7Smrj	movq	REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
372ae115bc7Smrj	movq	REG_OFF(KDIREG_RIP)(%rbp), %rsi
373ae115bc7Smrj	movq	REG_OFF(KDIREG_RSP)(%rbp), %rdx
374ae115bc7Smrj	movq	%rbx, %rcx		/* cpuid */
375ae115bc7Smrj
376ae115bc7Smrj	call	kdi_dvec_handle_fault
377ae115bc7Smrj
378ae115bc7Smrj	/*
379ae115bc7Smrj	 * If we're here, we ran into a debugger problem, and the user
380ae115bc7Smrj	 * elected to solve it by having the debugger debug itself.  The
381ae115bc7Smrj	 * state we're about to save is that of the debugger when it took
382ae115bc7Smrj	 * the fault.
383ae115bc7Smrj	 */
384ae115bc7Smrj
385ae115bc7Smrj	jmp	kdi_save_common_state
386ae115bc7Smrj
387ae115bc7Smrj	SET_SIZE(kdi_master_entry)
388ae115bc7Smrj	SET_SIZE(kdi_cmnint)
389ae115bc7Smrj
390ae115bc7Smrj#endif	/* __lint */
391ae115bc7Smrj
392ae115bc7Smrj/*
393ae115bc7Smrj * The cross-call handler for slave CPUs.
394ae115bc7Smrj *
395ae115bc7Smrj * The debugger is single-threaded, so only one CPU, called the master, may be
396ae115bc7Smrj * running it at any given time.  The other CPUs, known as slaves, spin in a
397ae115bc7Smrj * busy loop until there's something for them to do.  This is the entry point
398ae115bc7Smrj * for the slaves - they'll be sent here in response to a cross-call sent by the
399ae115bc7Smrj * master.
400ae115bc7Smrj */
401ae115bc7Smrj
402ae115bc7Smrj#if defined(__lint)
403ae115bc7Smrjchar kdi_slave_entry_patch;
404ae115bc7Smrj
405ae115bc7Smrjvoid
406ae115bc7Smrjkdi_slave_entry(void)
407ae115bc7Smrj{
408ae115bc7Smrj}
409ae115bc7Smrj#else /* __lint */
410ae115bc7Smrj	.globl	kdi_slave_entry_patch;
411ae115bc7Smrj
412ae115bc7Smrj	ENTRY_NP(kdi_slave_entry)
413ae115bc7Smrj
414ae115bc7Smrj	/* kdi_msr_add_clrentry knows where this is */
415ae115bc7Smrjkdi_slave_entry_patch:
416ae115bc7Smrj	KDI_MSR_PATCH;
417ae115bc7Smrj
418ae115bc7Smrj	/*
419ae115bc7Smrj	 * Cross calls are implemented as function calls, so our stack currently
420ae115bc7Smrj	 * looks like one you'd get from a zero-argument function call.  That
421ae115bc7Smrj	 * is, there's the return %rip at %rsp, and that's about it.  We need
422ae115bc7Smrj	 * to make it look like an interrupt stack.  When we first save, we'll
423ae115bc7Smrj	 * reverse the saved %ss and %rip, which we'll fix back up when we've
424ae115bc7Smrj	 * freed up some general-purpose registers.  We'll also need to fix up
425ae115bc7Smrj	 * the saved %rsp.
426ae115bc7Smrj	 */
427ae115bc7Smrj
428ae115bc7Smrj	pushq	%rsp		/* pushed value off by 8 */
429ae115bc7Smrj	pushfq
430ae115bc7Smrj	CLI(%rax)
431ae115bc7Smrj	pushq	$KCS_SEL
432ae115bc7Smrj	clrq	%rax
433ae115bc7Smrj	movw	%ss, %ax
434ae115bc7Smrj	pushq	%rax		/* rip should be here */
435ae115bc7Smrj	pushq	$-1		/* phony trap error code */
436ae115bc7Smrj	pushq	$-1		/* phony trap number */
437ae115bc7Smrj
438ae115bc7Smrj	subq	$REG_OFF(KDIREG_TRAPNO), %rsp
439ae115bc7Smrj	KDI_SAVE_REGS(%rsp)
440ae115bc7Smrj
441ae115bc7Smrj	movq	REG_OFF(KDIREG_SS)(%rsp), %rax
442ae115bc7Smrj	xchgq	REG_OFF(KDIREG_RIP)(%rsp), %rax
443ae115bc7Smrj	movq	%rax, REG_OFF(KDIREG_SS)(%rsp)
444ae115bc7Smrj
445ae115bc7Smrj	movq	REG_OFF(KDIREG_RSP)(%rsp), %rax
446ae115bc7Smrj	addq	$8, %rax
447ae115bc7Smrj	movq	%rax, REG_OFF(KDIREG_RSP)(%rsp)
448ae115bc7Smrj
449ae115bc7Smrj	/*
450ae115bc7Smrj	 * We've saved all of the general-purpose registers, and have a stack
451ae115bc7Smrj	 * that is irettable (after we strip down to the error code)
452ae115bc7Smrj	 */
453ae115bc7Smrj
454ae115bc7Smrj	GET_CPUSAVE_ADDR	/* %rax = cpusave, %rbx = CPU ID */
455ae115bc7Smrj
456ae115bc7Smrj	ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
457ae115bc7Smrj
458ae115bc7Smrj	ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
459ae115bc7Smrj
460ae115bc7Smrj	movq	REG_OFF(KDIREG_RIP)(%rsp), %rcx
461ae115bc7Smrj	ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
462ae115bc7Smrj
463ae115bc7Smrj	pushq	%rax
464ae115bc7Smrj	jmp	kdi_save_common_state
465ae115bc7Smrj
466ae115bc7Smrj	SET_SIZE(kdi_slave_entry)
467ae115bc7Smrj
468ae115bc7Smrj#endif	/* __lint */
469ae115bc7Smrj
470ae115bc7Smrj/*
471ae115bc7Smrj * The state of the world:
472ae115bc7Smrj *
473ae115bc7Smrj * The stack has a complete set of saved registers and segment
474ae115bc7Smrj * selectors, arranged in the kdi_regs.h order.  It also has a pointer
475ae115bc7Smrj * to our cpusave area.
476ae115bc7Smrj *
477ae115bc7Smrj * We need to save, into the cpusave area, a pointer to these saved
478*843e1988Sjohnlev * registers.  First we check whether we should jump straight back to
479*843e1988Sjohnlev * the kernel.  If not, we save a few more registers, ready the
480ae115bc7Smrj * machine for debugger entry, and enter the debugger.
481ae115bc7Smrj */
482ae115bc7Smrj
483*843e1988Sjohnlev#if !defined(__lint)
484*843e1988Sjohnlev
485*843e1988Sjohnlev	ENTRY_NP(kdi_save_common_state)
486*843e1988Sjohnlev
487*843e1988Sjohnlev	popq	%rdi			/* the cpusave area */
488*843e1988Sjohnlev	movq	%rsp, KRS_GREGS(%rdi)	/* save ptr to current saved regs */
489*843e1988Sjohnlev
490*843e1988Sjohnlev	pushq	%rdi
491*843e1988Sjohnlev	call	kdi_trap_pass
492*843e1988Sjohnlev	cmpq	$1, %rax
493*843e1988Sjohnlev	je	kdi_pass_to_kernel
494*843e1988Sjohnlev	popq	%rax /* cpusave in %rax */
495ae115bc7Smrj
496ae115bc7Smrj	SAVE_IDTGDT
497ae115bc7Smrj
498*843e1988Sjohnlev#if !defined(__xpv)
499ae115bc7Smrj	/* Save off %cr0, and clear write protect */
500ae115bc7Smrj	movq	%cr0, %rcx
501ae115bc7Smrj	movq	%rcx, KRS_CR0(%rax)
502ae115bc7Smrj	andq	$_BITNOT(CR0_WP), %rcx
503ae115bc7Smrj	movq	%rcx, %cr0
504*843e1988Sjohnlev#endif
505ae115bc7Smrj
506ae115bc7Smrj	/* Save the debug registers and disable any active watchpoints */
507ae115bc7Smrj
508ae115bc7Smrj	movq	%rax, %r15		/* save cpusave area ptr */
509ae115bc7Smrj	movl	$7, %edi
510ae115bc7Smrj	call	kdi_dreg_get
511ae115bc7Smrj	movq	%rax, KRS_DRCTL(%r15)
512ae115bc7Smrj
513ae115bc7Smrj	andq	$_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
514ae115bc7Smrj	movq	%rax, %rsi
515ae115bc7Smrj	movl	$7, %edi
516ae115bc7Smrj	call	kdi_dreg_set
517ae115bc7Smrj
518ae115bc7Smrj	movl	$6, %edi
519ae115bc7Smrj	call	kdi_dreg_get
520ae115bc7Smrj	movq	%rax, KRS_DRSTAT(%r15)
521ae115bc7Smrj
522ae115bc7Smrj	movl	$0, %edi
523ae115bc7Smrj	call	kdi_dreg_get
524ae115bc7Smrj	movq	%rax, KRS_DROFF(0)(%r15)
525ae115bc7Smrj
526ae115bc7Smrj	movl	$1, %edi
527ae115bc7Smrj	call	kdi_dreg_get
528ae115bc7Smrj	movq	%rax, KRS_DROFF(1)(%r15)
529ae115bc7Smrj
530ae115bc7Smrj	movl	$2, %edi
531ae115bc7Smrj	call	kdi_dreg_get
532ae115bc7Smrj	movq	%rax, KRS_DROFF(2)(%r15)
533ae115bc7Smrj
534ae115bc7Smrj	movl	$3, %edi
535ae115bc7Smrj	call	kdi_dreg_get
536ae115bc7Smrj	movq	%rax, KRS_DROFF(3)(%r15)
537ae115bc7Smrj
538ae115bc7Smrj	movq	%r15, %rax	/* restore cpu save area to rax */
539ae115bc7Smrj
540ae115bc7Smrj	/*
541ae115bc7Smrj	 * Save any requested MSRs.
542ae115bc7Smrj	 */
543ae115bc7Smrj	movq	KRS_MSR(%rax), %rcx
544ae115bc7Smrj	cmpq	$0, %rcx
545ae115bc7Smrj	je	no_msr
546ae115bc7Smrj
547ae115bc7Smrj	pushq	%rax		/* rdmsr clobbers %eax */
548ae115bc7Smrj	movq	%rcx, %rbx
549ae115bc7Smrj
550ae115bc7Smrj1:
551ae115bc7Smrj	movl	MSR_NUM(%rbx), %ecx
552ae115bc7Smrj	cmpl	$0, %ecx
553ae115bc7Smrj	je	msr_done
554ae115bc7Smrj
555ae115bc7Smrj	movl	MSR_TYPE(%rbx), %edx
556ae115bc7Smrj	cmpl	$KDI_MSR_READ, %edx
557ae115bc7Smrj	jne	msr_next
558ae115bc7Smrj
559ae115bc7Smrj	rdmsr			/* addr in %ecx, value into %edx:%eax */
560ae115bc7Smrj	movl	%eax, MSR_VAL(%rbx)
561ae115bc7Smrj	movl	%edx, _CONST(MSR_VAL + 4)(%rbx)
562ae115bc7Smrj
563ae115bc7Smrjmsr_next:
564ae115bc7Smrj	addq	$MSR_SIZE, %rbx
565ae115bc7Smrj	jmp	1b
566ae115bc7Smrj
567ae115bc7Smrjmsr_done:
568ae115bc7Smrj	popq	%rax
569ae115bc7Smrj
570ae115bc7Smrjno_msr:
571ae115bc7Smrj	clrq	%rbp		/* stack traces should end here */
572ae115bc7Smrj
573ae115bc7Smrj	pushq	%rax
574ae115bc7Smrj	movq	%rax, %rdi	/* cpusave */
575ae115bc7Smrj
576ae115bc7Smrj	call	kdi_debugger_entry
577ae115bc7Smrj
578*843e1988Sjohnlev	/* Pass cpusave to kdi_resume */
579ae115bc7Smrj	popq	%rdi
580ae115bc7Smrj
581ae115bc7Smrj	jmp	kdi_resume
582ae115bc7Smrj
583ae115bc7Smrj	SET_SIZE(kdi_save_common_state)
584ae115bc7Smrj
585ae115bc7Smrj#endif	/* !__lint */
586ae115bc7Smrj
587ae115bc7Smrj/*
588*843e1988Sjohnlev * Resume the world.  The code that calls kdi_resume has already
589*843e1988Sjohnlev * decided whether or not to restore the IDT.
590ae115bc7Smrj */
591*843e1988Sjohnlev#if defined(__lint)
592*843e1988Sjohnlevvoid
593*843e1988Sjohnlevkdi_resume(void)
594*843e1988Sjohnlev{
595*843e1988Sjohnlev}
596*843e1988Sjohnlev#else	/* __lint */
597*843e1988Sjohnlev
598*843e1988Sjohnlev	/* cpusave in %rdi */
599*843e1988Sjohnlev	ENTRY_NP(kdi_resume)
600*843e1988Sjohnlev
601*843e1988Sjohnlev	/*
602*843e1988Sjohnlev	 * Send this CPU back into the world
603*843e1988Sjohnlev	 */
604*843e1988Sjohnlev#if !defined(__xpv)
605*843e1988Sjohnlev	movq	KRS_CR0(%rdi), %rdx
606*843e1988Sjohnlev	movq	%rdx, %cr0
607*843e1988Sjohnlev#endif
608*843e1988Sjohnlev
609*843e1988Sjohnlev	KDI_RESTORE_DEBUGGING_STATE
610*843e1988Sjohnlev
611*843e1988Sjohnlev	movq	KRS_GREGS(%rdi), %rsp
612*843e1988Sjohnlev	KDI_RESTORE_REGS(%rsp)
613*843e1988Sjohnlev	addq	$REG_OFF(KDIREG_RIP), %rsp	/* Discard state, trapno, err */
614*843e1988Sjohnlev	IRET
615*843e1988Sjohnlev	/*NOTREACHED*/
616*843e1988Sjohnlev	SET_SIZE(kdi_resume)
617*843e1988Sjohnlev
618*843e1988Sjohnlev#endif	/* __lint */
619*843e1988Sjohnlev
620*843e1988Sjohnlev#if !defined(__lint)
621*843e1988Sjohnlev
622*843e1988Sjohnlev	ENTRY_NP(kdi_pass_to_kernel)
623*843e1988Sjohnlev
624*843e1988Sjohnlev	popq	%rdi /* cpusave */
625*843e1988Sjohnlev
626*843e1988Sjohnlev	movq	$KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
627*843e1988Sjohnlev
628*843e1988Sjohnlev	/*
629*843e1988Sjohnlev	 * Find the trap and vector off the right kernel handler.  The trap
630*843e1988Sjohnlev	 * handler will expect the stack to be in trap order, with %rip being
631*843e1988Sjohnlev	 * the last entry, so we'll need to restore all our regs.  On i86xpv
632*843e1988Sjohnlev	 * we'll need to compensate for XPV_TRAP_POP.
633*843e1988Sjohnlev	 *
634*843e1988Sjohnlev	 * We're hard-coding the three cases where KMDB has installed permanent
635*843e1988Sjohnlev	 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
636*843e1988Sjohnlev	 * to work with; we can't use a global since other CPUs can easily pass
637*843e1988Sjohnlev	 * through here at the same time.
638*843e1988Sjohnlev	 *
639*843e1988Sjohnlev	 * Note that we handle T_DBGENTR since userspace might have tried it.
640*843e1988Sjohnlev	 */
641*843e1988Sjohnlev	movq	KRS_GREGS(%rdi), %rsp
642*843e1988Sjohnlev	movq	REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
643*843e1988Sjohnlev	cmpq	$T_SGLSTP, %rdi
644*843e1988Sjohnlev	je	1f
645*843e1988Sjohnlev	cmpq	$T_BPTFLT, %rdi
646*843e1988Sjohnlev	je	2f
647*843e1988Sjohnlev	cmpq	$T_DBGENTR, %rdi
648*843e1988Sjohnlev	je	3f
649*843e1988Sjohnlev	/*
650*843e1988Sjohnlev	 * Hmm, unknown handler.  Somebody forgot to update this when they
651*843e1988Sjohnlev	 * added a new trap interposition... try to drop back into kmdb.
652*843e1988Sjohnlev	 */
653*843e1988Sjohnlev	int	$T_DBGENTR
654*843e1988Sjohnlev
655*843e1988Sjohnlev#define	CALL_TRAP_HANDLER(name) \
656*843e1988Sjohnlev	KDI_RESTORE_REGS(%rsp); \
657*843e1988Sjohnlev	/* Discard state, trapno, err */ \
658*843e1988Sjohnlev	addq	$REG_OFF(KDIREG_RIP), %rsp; \
659*843e1988Sjohnlev	XPV_TRAP_PUSH; \
660*843e1988Sjohnlev	jmp	%cs:name
661*843e1988Sjohnlev
662ae115bc7Smrj1:
663*843e1988Sjohnlev	CALL_TRAP_HANDLER(dbgtrap)
664*843e1988Sjohnlev	/*NOTREACHED*/
665*843e1988Sjohnlev2:
666*843e1988Sjohnlev	CALL_TRAP_HANDLER(brktrap)
667*843e1988Sjohnlev	/*NOTREACHED*/
668*843e1988Sjohnlev3:
669*843e1988Sjohnlev	CALL_TRAP_HANDLER(invaltrap)
670*843e1988Sjohnlev	/*NOTREACHED*/
671*843e1988Sjohnlev
672*843e1988Sjohnlev	SET_SIZE(kdi_pass_to_kernel)
673*843e1988Sjohnlev
674*843e1988Sjohnlev	/*
675*843e1988Sjohnlev	 * A minimal version of mdboot(), to be used by the master CPU only.
676*843e1988Sjohnlev	 */
677*843e1988Sjohnlev	ENTRY_NP(kdi_reboot)
678*843e1988Sjohnlev
679*843e1988Sjohnlev	movl	$AD_BOOT, %edi
680*843e1988Sjohnlev	movl	$A_SHUTDOWN, %esi
681*843e1988Sjohnlev	call	*psm_shutdownf
682*843e1988Sjohnlev#if defined(__xpv)
683*843e1988Sjohnlev	movl	$SHUTDOWN_reboot, %edi
684*843e1988Sjohnlev	call	HYPERVISOR_shutdown
685*843e1988Sjohnlev#else
686*843e1988Sjohnlev	call	reset
687*843e1988Sjohnlev#endif
688*843e1988Sjohnlev	/*NOTREACHED*/
689*843e1988Sjohnlev
690*843e1988Sjohnlev	SET_SIZE(kdi_reboot)
691*843e1988Sjohnlev
692*843e1988Sjohnlev#endif	/* !__lint */
693ae115bc7Smrj
694ae115bc7Smrj#if defined(__lint)
695ae115bc7Smrj/*ARGSUSED*/
696ae115bc7Smrjvoid
697ae115bc7Smrjkdi_cpu_debug_init(kdi_cpusave_t *save)
698ae115bc7Smrj{
699ae115bc7Smrj}
700ae115bc7Smrj#else	/* __lint */
701ae115bc7Smrj
702ae115bc7Smrj	ENTRY_NP(kdi_cpu_debug_init)
703ae115bc7Smrj	pushq	%rbp
704ae115bc7Smrj	movq	%rsp, %rbp
705ae115bc7Smrj
706ae115bc7Smrj	pushq	%rbx		/* macro will clobber %rbx */
707ae115bc7Smrj	KDI_RESTORE_DEBUGGING_STATE
708ae115bc7Smrj	popq	%rbx
709ae115bc7Smrj
710ae115bc7Smrj	leave
711ae115bc7Smrj	ret
712ae115bc7Smrj
713ae115bc7Smrj	SET_SIZE(kdi_cpu_debug_init)
714ae115bc7Smrj#endif	/* !__lint */
715ae115bc7Smrj
716