xref: /titanic_52/usr/src/uts/intel/kdi/ia32/kdi_asm.s (revision 843e19887f64dde75055cf8842fc4db2171eff45)
1ae115bc7Smrj/*
2ae115bc7Smrj * CDDL HEADER START
3ae115bc7Smrj *
4ae115bc7Smrj * The contents of this file are subject to the terms of the
5ae115bc7Smrj * Common Development and Distribution License (the "License").
6ae115bc7Smrj * You may not use this file except in compliance with the License.
7ae115bc7Smrj *
8ae115bc7Smrj * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9ae115bc7Smrj * or http://www.opensolaris.org/os/licensing.
10ae115bc7Smrj * See the License for the specific language governing permissions
11ae115bc7Smrj * and limitations under the License.
12ae115bc7Smrj *
13ae115bc7Smrj * When distributing Covered Code, include this CDDL HEADER in each
14ae115bc7Smrj * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15ae115bc7Smrj * If applicable, add the following below this CDDL HEADER, with the
16ae115bc7Smrj * fields enclosed by brackets "[]" replaced with your own identifying
17ae115bc7Smrj * information: Portions Copyright [yyyy] [name of copyright owner]
18ae115bc7Smrj *
19ae115bc7Smrj * CDDL HEADER END
20ae115bc7Smrj */
21ae115bc7Smrj
22ae115bc7Smrj/*
23ae115bc7Smrj * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24ae115bc7Smrj * Use is subject to license terms.
25ae115bc7Smrj */
26ae115bc7Smrj
27ae115bc7Smrj#pragma ident	"%Z%%M%	%I%	%E% SMI"
28ae115bc7Smrj
29ae115bc7Smrj/*
30ae115bc7Smrj * Debugger entry for both master and slave CPUs
31ae115bc7Smrj */
32ae115bc7Smrj
33ae115bc7Smrj#if defined(__lint)
34ae115bc7Smrj#include <sys/types.h>
35ae115bc7Smrj#endif
36ae115bc7Smrj
37ae115bc7Smrj#include <sys/segments.h>
38ae115bc7Smrj#include <sys/asm_linkage.h>
39ae115bc7Smrj#include <sys/controlregs.h>
40ae115bc7Smrj#include <sys/x86_archext.h>
41ae115bc7Smrj#include <sys/privregs.h>
42ae115bc7Smrj#include <sys/machprivregs.h>
43ae115bc7Smrj#include <sys/kdi_regs.h>
44ae115bc7Smrj#include <sys/uadmin.h>
45ae115bc7Smrj#include <sys/psw.h>
46ae115bc7Smrj
47ae115bc7Smrj#ifdef _ASM
48ae115bc7Smrj
49ae115bc7Smrj#include <kdi_assym.h>
50ae115bc7Smrj#include <assym.h>
51ae115bc7Smrj
52ae115bc7Smrj/* clobbers %edx, %ecx, returns addr in %eax, cpu id in %ebx */
53ae115bc7Smrj#define	GET_CPUSAVE_ADDR \
54ae115bc7Smrj	movl	%gs:CPU_ID, %ebx;		\
55ae115bc7Smrj	movl	%ebx, %eax;			\
56ae115bc7Smrj	movl	$KRS_SIZE, %ecx;		\
57ae115bc7Smrj	mull	%ecx;				\
58ae115bc7Smrj	movl	$kdi_cpusave, %edx;		\
59ae115bc7Smrj	/*CSTYLED*/				\
60ae115bc7Smrj	addl	(%edx), %eax
61ae115bc7Smrj
62ae115bc7Smrj/*
63ae115bc7Smrj * Save copies of the IDT and GDT descriptors.  Note that we only save the IDT
64ae115bc7Smrj * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
65ae115bc7Smrj * debugger through the trap handler.  We don't want to clobber the saved IDT
66ae115bc7Smrj * in the process, as we'd end up resuming the world on our IDT.
67ae115bc7Smrj */
68ae115bc7Smrj#define	SAVE_IDTGDT				\
69ae115bc7Smrj	movl	%gs:CPU_IDT, %edx;		\
70ae115bc7Smrj	cmpl	$kdi_idt, %edx;			\
71ae115bc7Smrj	je	1f;				\
72ae115bc7Smrj	movl	%edx, KRS_IDT(%eax);		\
73ae115bc7Smrj	movl	%gs:CPU_GDT, %edx;		\
74ae115bc7Smrj	movl	%edx, KRS_GDT(%eax);		\
75ae115bc7Smrj1:
76ae115bc7Smrj
77ae115bc7Smrj/*
78*843e1988Sjohnlev * Given the address of the current CPU's cpusave area in %edi, the following
79*843e1988Sjohnlev * macro restores the debugging state to said CPU.  Restored state includes
80*843e1988Sjohnlev * the debug registers from the global %dr variables, and debugging MSRs from
81*843e1988Sjohnlev * the CPU save area.  This code would be in a separate routine, but for the
82*843e1988Sjohnlev * fact that some of the MSRs are jump-sensitive.  As such, we need to minimize
83*843e1988Sjohnlev * the number of jumps taken subsequent to the update of said MSRs.  We can
84*843e1988Sjohnlev * remove one jump (the ret) by using a macro instead of a function for the
85*843e1988Sjohnlev * debugging state restoration code.
86*843e1988Sjohnlev *
87*843e1988Sjohnlev * Takes the cpusave area in %edi as a parameter, clobbers %eax-%edx
88*843e1988Sjohnlev */
89*843e1988Sjohnlev#define	KDI_RESTORE_DEBUGGING_STATE \
90*843e1988Sjohnlev	leal	kdi_drreg, %ebx;				\
91*843e1988Sjohnlev								\
92*843e1988Sjohnlev	pushl	DR_CTL(%ebx);					\
93*843e1988Sjohnlev	pushl	$7;						\
94*843e1988Sjohnlev	call	kdi_dreg_set;					\
95*843e1988Sjohnlev	addl	$8, %esp;					\
96*843e1988Sjohnlev								\
97*843e1988Sjohnlev	pushl	$KDIREG_DRSTAT_RESERVED;				\
98*843e1988Sjohnlev	pushl	$6;						\
99*843e1988Sjohnlev	call	kdi_dreg_set;					\
100*843e1988Sjohnlev	addl	$8, %esp;					\
101*843e1988Sjohnlev								\
102*843e1988Sjohnlev	pushl	DRADDR_OFF(0)(%ebx);				\
103*843e1988Sjohnlev	pushl	$0;						\
104*843e1988Sjohnlev	call	kdi_dreg_set;					\
105*843e1988Sjohnlev	addl	$8, %esp;					\
106*843e1988Sjohnlev								\
107*843e1988Sjohnlev	pushl	DRADDR_OFF(1)(%ebx);				\
108*843e1988Sjohnlev	pushl	$1;						\
109*843e1988Sjohnlev	call	kdi_dreg_set;			 		\
110*843e1988Sjohnlev	addl	$8, %esp;					\
111*843e1988Sjohnlev								\
112*843e1988Sjohnlev	pushl	DRADDR_OFF(2)(%ebx);				\
113*843e1988Sjohnlev	pushl	$2;						\
114*843e1988Sjohnlev	call	kdi_dreg_set;					\
115*843e1988Sjohnlev	addl	$8, %esp;					\
116*843e1988Sjohnlev								\
117*843e1988Sjohnlev	pushl	DRADDR_OFF(3)(%ebx);				\
118*843e1988Sjohnlev	pushl	$3;						\
119*843e1988Sjohnlev	call	kdi_dreg_set;					\
120*843e1988Sjohnlev	addl	$8, %esp;					\
121*843e1988Sjohnlev								\
122*843e1988Sjohnlev	/*							\
123*843e1988Sjohnlev	 * Write any requested MSRs.				\
124*843e1988Sjohnlev	 */							\
125*843e1988Sjohnlev	movl	KRS_MSR(%edi), %ebx;				\
126*843e1988Sjohnlev	cmpl	$0, %ebx;					\
127*843e1988Sjohnlev	je	3f;						\
128*843e1988Sjohnlev1:								\
129*843e1988Sjohnlev	movl	MSR_NUM(%ebx), %ecx;				\
130*843e1988Sjohnlev	cmpl	$0, %ecx;					\
131*843e1988Sjohnlev	je	3f;						\
132*843e1988Sjohnlev								\
133*843e1988Sjohnlev	movl	MSR_TYPE(%ebx), %edx;				\
134*843e1988Sjohnlev	cmpl	$KDI_MSR_WRITE, %edx;				\
135*843e1988Sjohnlev	jne	2f;						\
136*843e1988Sjohnlev								\
137*843e1988Sjohnlev	movl	MSR_VALP(%ebx), %edx;				\
138*843e1988Sjohnlev	movl	0(%edx), %eax;					\
139*843e1988Sjohnlev	movl	4(%edx), %edx;					\
140*843e1988Sjohnlev	wrmsr;							\
141*843e1988Sjohnlev2:								\
142*843e1988Sjohnlev	addl	$MSR_SIZE, %ebx;				\
143*843e1988Sjohnlev	jmp	1b;						\
144*843e1988Sjohnlev3:								\
145*843e1988Sjohnlev	/*							\
146*843e1988Sjohnlev	 * We must not branch after re-enabling LBR.  If	\
147*843e1988Sjohnlev	 * kdi_wsr_wrexit_msr is set, it contains the number	\
148*843e1988Sjohnlev	 * of the MSR that controls LBR.  kdi_wsr_wrexit_valp	\
149*843e1988Sjohnlev	 * contains the value that is to be written to enable	\
150*843e1988Sjohnlev	 * LBR.							\
151*843e1988Sjohnlev	 */							\
152*843e1988Sjohnlev	movl	kdi_msr_wrexit_msr, %ecx;			\
153*843e1988Sjohnlev	cmpl	$0, %ecx;					\
154*843e1988Sjohnlev	je	1f;						\
155*843e1988Sjohnlev								\
156*843e1988Sjohnlev	movl	kdi_msr_wrexit_valp, %edx;			\
157*843e1988Sjohnlev	movl	0(%edx), %eax;					\
158*843e1988Sjohnlev	movl	4(%edx), %edx;					\
159*843e1988Sjohnlev								\
160*843e1988Sjohnlev	wrmsr;							\
161*843e1988Sjohnlev1:
162*843e1988Sjohnlev
163*843e1988Sjohnlev#define	KDI_RESTORE_REGS() \
164*843e1988Sjohnlev	/* Discard savfp and savpc */ \
165*843e1988Sjohnlev	addl	$8, %esp; \
166*843e1988Sjohnlev	popl	%ss; \
167*843e1988Sjohnlev	popl	%gs; \
168*843e1988Sjohnlev	popl	%fs; \
169*843e1988Sjohnlev	popl	%es; \
170*843e1988Sjohnlev	popl	%ds; \
171*843e1988Sjohnlev	popal; \
172*843e1988Sjohnlev	/* Discard trapno and err */ \
173*843e1988Sjohnlev	addl	$8, %esp
174*843e1988Sjohnlev
175*843e1988Sjohnlev/*
176ae115bc7Smrj * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
177ae115bc7Smrj * The following macros manage the buffer.
178ae115bc7Smrj */
179ae115bc7Smrj
180ae115bc7Smrj/* Advance the ring buffer */
181ae115bc7Smrj#define	ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
182ae115bc7Smrj	movl	KRS_CURCRUMBIDX(cpusave), tmp1;	\
183ae115bc7Smrj	cmpl	$[KDI_NCRUMBS - 1], tmp1;	\
184ae115bc7Smrj	jge	1f;				\
185ae115bc7Smrj	/* Advance the pointer and index */	\
186ae115bc7Smrj	addl	$1, tmp1;			\
187ae115bc7Smrj	movl	tmp1, KRS_CURCRUMBIDX(cpusave);	\
188ae115bc7Smrj	movl	KRS_CURCRUMB(cpusave), tmp1;	\
189ae115bc7Smrj	addl	$KRM_SIZE, tmp1;		\
190ae115bc7Smrj	jmp	2f;				\
191ae115bc7Smrj1:	/* Reset the pointer and index */	\
192ae115bc7Smrj	movw	$0, KRS_CURCRUMBIDX(cpusave);	\
193ae115bc7Smrj	leal	KRS_CRUMBS(cpusave), tmp1;	\
194ae115bc7Smrj2:	movl	tmp1, KRS_CURCRUMB(cpusave);	\
195ae115bc7Smrj	/* Clear the new crumb */		\
196ae115bc7Smrj	movl	$KDI_NCRUMBS, tmp2;		\
197ae115bc7Smrj3:	movl	$0, -4(tmp1, tmp2, 4);		\
198ae115bc7Smrj	decl	tmp2;				\
199ae115bc7Smrj	jnz	3b
200ae115bc7Smrj
201ae115bc7Smrj/* Set a value in the current breadcrumb buffer */
202ae115bc7Smrj#define	ADD_CRUMB(cpusave, offset, value, tmp) \
203ae115bc7Smrj	movl	KRS_CURCRUMB(cpusave), tmp;	\
204ae115bc7Smrj	movl	value, offset(tmp)
205ae115bc7Smrj
206ae115bc7Smrj#endif	/* _ASM */
207ae115bc7Smrj
208ae115bc7Smrj/*
209ae115bc7Smrj * The main entry point for master CPUs.  It also serves as the trap handler
210ae115bc7Smrj * for all traps and interrupts taken during single-step.
211ae115bc7Smrj */
212ae115bc7Smrj#if defined(__lint)
213ae115bc7Smrjvoid
214ae115bc7Smrjkdi_cmnint(void)
215ae115bc7Smrj{
216ae115bc7Smrj}
217ae115bc7Smrj#else	/* __lint */
218ae115bc7Smrj
219ae115bc7Smrj 	/* XXX implement me */
220ae115bc7Smrj	ENTRY_NP(kdi_nmiint)
221ae115bc7Smrj	clr	%ecx
222ae115bc7Smrj	movl	(%ecx), %ecx
223ae115bc7Smrj	SET_SIZE(kdi_nmiint)
224ae115bc7Smrj
225ae115bc7Smrj	ENTRY_NP(kdi_cmnint)
226ae115bc7Smrj	ALTENTRY(kdi_master_entry)
227ae115bc7Smrj
228ae115bc7Smrj	/* Save all registers and selectors */
229ae115bc7Smrj
230ae115bc7Smrj	pushal
231ae115bc7Smrj	pushl	%ds
232ae115bc7Smrj	pushl	%es
233ae115bc7Smrj	pushl	%fs
234ae115bc7Smrj	pushl	%gs
235ae115bc7Smrj	pushl	%ss
236ae115bc7Smrj
237ae115bc7Smrj	subl	$8, %esp
238ae115bc7Smrj	movl	%ebp, REG_OFF(KDIREG_SAVFP)(%esp)
239ae115bc7Smrj	movl	REG_OFF(KDIREG_EIP)(%esp), %eax
240ae115bc7Smrj	movl	%eax, REG_OFF(KDIREG_SAVPC)(%esp)
241ae115bc7Smrj
242ae115bc7Smrj	/*
243ae115bc7Smrj	 * If the kernel has started using its own selectors, we should too.
244ae115bc7Smrj	 * Update our saved selectors if they haven't been updated already.
245ae115bc7Smrj	 */
246ae115bc7Smrj	movw	%cs, %ax
247ae115bc7Smrj	cmpw	$KCS_SEL, %ax
248ae115bc7Smrj	jne	1f			/* The kernel hasn't switched yet */
249ae115bc7Smrj
250ae115bc7Smrj	movw	$KDS_SEL, %ax
251ae115bc7Smrj	movw	%ax, %ds
252ae115bc7Smrj	movw	kdi_cs, %ax
253ae115bc7Smrj	cmpw	$KCS_SEL, %ax
254ae115bc7Smrj	je	1f			/* We already switched */
255ae115bc7Smrj
256ae115bc7Smrj	/*
257ae115bc7Smrj	 * The kernel switched, but we haven't.  Update our saved selectors
258ae115bc7Smrj	 * to match the kernel's copies for use below.
259ae115bc7Smrj	 */
260ae115bc7Smrj	movl	$KCS_SEL, kdi_cs
261ae115bc7Smrj	movl	$KDS_SEL, kdi_ds
262ae115bc7Smrj	movl	$KFS_SEL, kdi_fs
263ae115bc7Smrj	movl	$KGS_SEL, kdi_gs
264ae115bc7Smrj
265ae115bc7Smrj1:
266ae115bc7Smrj	/*
267ae115bc7Smrj	 * Set the selectors to a known state.  If we come in from kmdb's IDT,
268ae115bc7Smrj	 * we'll be on boot's %cs.  This will cause GET_CPUSAVE_ADDR to return
269ae115bc7Smrj	 * CPU 0's cpusave, regardless of which CPU we're on, and chaos will
270ae115bc7Smrj	 * ensue.  So, if we've got $KCSSEL in kdi_cs, switch to it.  The other
271ae115bc7Smrj	 * selectors are restored normally.
272ae115bc7Smrj	 */
273ae115bc7Smrj	movw	%cs:kdi_cs, %ax
274ae115bc7Smrj	cmpw	$KCS_SEL, %ax
275ae115bc7Smrj	jne	1f
276ae115bc7Smrj	ljmp	$KCS_SEL, $1f
277ae115bc7Smrj1:
278ae115bc7Smrj	movw	%cs:kdi_ds, %ds
279ae115bc7Smrj	movw	kdi_ds, %es
280ae115bc7Smrj	movw	kdi_fs, %fs
281ae115bc7Smrj	movw	kdi_gs, %gs
282ae115bc7Smrj	movw	kdi_ds, %ss
283ae115bc7Smrj
284ae115bc7Smrj	/*
285ae115bc7Smrj	 * This has to come after we set %gs to the kernel descriptor.  Since
286ae115bc7Smrj	 * we've hijacked some IDT entries used in user-space such as the
287ae115bc7Smrj	 * breakpoint handler, we can enter kdi_cmnint() with GDT_LWPGS used
288ae115bc7Smrj	 * in %gs.  On the hypervisor, CLI() needs GDT_GS to access the machcpu.
289ae115bc7Smrj	 */
290ae115bc7Smrj	CLI(%eax)
291ae115bc7Smrj
292*843e1988Sjohnlev#if defined(__xpv)
293*843e1988Sjohnlev	/*
294*843e1988Sjohnlev	 * Clear saved_upcall_mask in unused byte of cs slot on stack.
295*843e1988Sjohnlev	 * It can only confuse things.
296*843e1988Sjohnlev	 */
297*843e1988Sjohnlev	movb    $0, REG_OFF(KDIREG_CS)+2(%esp)
298*843e1988Sjohnlev
299*843e1988Sjohnlev#endif
300*843e1988Sjohnlev
301ae115bc7Smrj	GET_CPUSAVE_ADDR		/* %eax = cpusave, %ebx = CPU ID */
302ae115bc7Smrj
303ae115bc7Smrj	ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
304ae115bc7Smrj
305ae115bc7Smrj	ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %edx)
306ae115bc7Smrj
307ae115bc7Smrj	movl	REG_OFF(KDIREG_EIP)(%esp), %ecx
308ae115bc7Smrj	ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
309ae115bc7Smrj	ADD_CRUMB(%eax, KRM_SP, %esp, %edx)
310ae115bc7Smrj	movl	REG_OFF(KDIREG_TRAPNO)(%esp), %ecx
311ae115bc7Smrj	ADD_CRUMB(%eax, KRM_TRAPNO, %ecx, %edx)
312ae115bc7Smrj
313ae115bc7Smrj	movl	%esp, %ebp
314ae115bc7Smrj	pushl	%eax
315ae115bc7Smrj
316ae115bc7Smrj	/*
317ae115bc7Smrj	 * Were we in the debugger when we took the trap (i.e. was %esp in one
318ae115bc7Smrj	 * of the debugger's memory ranges)?
319ae115bc7Smrj	 */
320ae115bc7Smrj	leal	kdi_memranges, %ecx
321ae115bc7Smrj	movl	kdi_nmemranges, %edx
322ae115bc7Smrj1:	cmpl	MR_BASE(%ecx), %esp
323ae115bc7Smrj	jl	2f		/* below this range -- try the next one */
324ae115bc7Smrj	cmpl	MR_LIM(%ecx), %esp
325ae115bc7Smrj	jg	2f		/* above this range -- try the next one */
326ae115bc7Smrj	jmp	3f		/* matched within this range */
327ae115bc7Smrj
328ae115bc7Smrj2:	decl	%edx
329ae115bc7Smrj	jz	kdi_save_common_state	/* %esp not within debugger memory */
330ae115bc7Smrj	addl	$MR_SIZE, %ecx
331ae115bc7Smrj	jmp	1b
332ae115bc7Smrj
333ae115bc7Smrj3:	/*
334ae115bc7Smrj	 * %esp was within one of the debugger's memory ranges.  This should
335ae115bc7Smrj	 * only happen when we take a trap while running in the debugger.
336ae115bc7Smrj	 * kmdb_dpi_handle_fault will determine whether or not it was an
337ae115bc7Smrj	 * expected trap, and will take the appropriate action.
338ae115bc7Smrj	 */
339ae115bc7Smrj
340ae115bc7Smrj	pushl	%ebx			/* cpuid */
341ae115bc7Smrj
342ae115bc7Smrj	movl	REG_OFF(KDIREG_ESP)(%ebp), %ecx
343ae115bc7Smrj	addl	$REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), %ecx
344ae115bc7Smrj	pushl	%ecx
345ae115bc7Smrj
346ae115bc7Smrj	pushl	REG_OFF(KDIREG_EIP)(%ebp)
347ae115bc7Smrj	pushl	REG_OFF(KDIREG_TRAPNO)(%ebp)
348ae115bc7Smrj
349ae115bc7Smrj	call	kdi_dvec_handle_fault
350ae115bc7Smrj	addl	$16, %esp
351ae115bc7Smrj
352ae115bc7Smrj	/*
353ae115bc7Smrj	 * If we're here, we ran into a debugger problem, and the user
354ae115bc7Smrj	 * elected to solve it by having the debugger debug itself.  The
355ae115bc7Smrj	 * state we're about to save is that of the debugger when it took
356ae115bc7Smrj	 * the fault.
357ae115bc7Smrj	 */
358ae115bc7Smrj
359ae115bc7Smrj	jmp	kdi_save_common_state
360ae115bc7Smrj
361ae115bc7Smrj	SET_SIZE(kdi_master_entry)
362ae115bc7Smrj	SET_SIZE(kdi_cmnint)
363ae115bc7Smrj
364ae115bc7Smrj#endif	/* __lint */
365ae115bc7Smrj
366ae115bc7Smrj/*
367ae115bc7Smrj * The cross-call handler for slave CPUs.
368ae115bc7Smrj *
369ae115bc7Smrj * The debugger is single-threaded, so only one CPU, called the master, may be
370ae115bc7Smrj * running it at any given time.  The other CPUs, known as slaves, spin in a
371ae115bc7Smrj * busy loop until there's something for them to do.  This is the entry point
372ae115bc7Smrj * for the slaves - they'll be sent here in response to a cross-call sent by the
373ae115bc7Smrj * master.
374ae115bc7Smrj */
375ae115bc7Smrj
376ae115bc7Smrj#if defined(__lint)
377ae115bc7Smrjchar kdi_slave_entry_patch;
378ae115bc7Smrj
379ae115bc7Smrjvoid
380ae115bc7Smrjkdi_slave_entry(void)
381ae115bc7Smrj{
382ae115bc7Smrj}
383ae115bc7Smrj#else /* __lint */
384ae115bc7Smrj	.globl	kdi_slave_entry_patch;
385ae115bc7Smrj
386ae115bc7Smrj	ENTRY_NP(kdi_slave_entry)
387ae115bc7Smrj
388ae115bc7Smrj	/* kdi_msr_add_clrentry knows where this is */
389ae115bc7Smrjkdi_slave_entry_patch:
390ae115bc7Smrj	KDI_MSR_PATCH;
391ae115bc7Smrj
392ae115bc7Smrj	/*
393ae115bc7Smrj	 * Cross calls are implemented as function calls, so our stack
394ae115bc7Smrj	 * currently looks like one you'd get from a zero-argument function
395ae115bc7Smrj	 * call. There's an %eip at %esp, and that's about it.  We want to
396ae115bc7Smrj	 * make it look like the master CPU's stack.  By doing this, we can
397ae115bc7Smrj	 * use the same resume code for both master and slave.  We need to
398ae115bc7Smrj	 * make our stack look like a `struct regs' before we jump into the
399ae115bc7Smrj	 * common save routine.
400ae115bc7Smrj	 */
401ae115bc7Smrj
402ae115bc7Smrj	pushl	%cs
403ae115bc7Smrj	pushfl
404ae115bc7Smrj	pushl	$-1		/* A phony trap error code */
405ae115bc7Smrj	pushl	$-1		/* A phony trap number */
406ae115bc7Smrj	pushal
407ae115bc7Smrj	pushl	%ds
408ae115bc7Smrj	pushl	%es
409ae115bc7Smrj	pushl	%fs
410ae115bc7Smrj	pushl	%gs
411ae115bc7Smrj	pushl	%ss
412ae115bc7Smrj
413ae115bc7Smrj	subl	$8, %esp
414ae115bc7Smrj	movl	%ebp, REG_OFF(KDIREG_SAVFP)(%esp)
415ae115bc7Smrj	movl	REG_OFF(KDIREG_EIP)(%esp), %eax
416ae115bc7Smrj	movl	%eax, REG_OFF(KDIREG_SAVPC)(%esp)
417ae115bc7Smrj
418ae115bc7Smrj	/*
419ae115bc7Smrj	 * Swap our saved EFLAGS and %eip.  Each is where the other
420ae115bc7Smrj	 * should be.
421ae115bc7Smrj	 */
422ae115bc7Smrj	movl	REG_OFF(KDIREG_EFLAGS)(%esp), %eax
423ae115bc7Smrj	xchgl	REG_OFF(KDIREG_EIP)(%esp), %eax
424ae115bc7Smrj	movl	%eax, REG_OFF(KDIREG_EFLAGS)(%esp)
425ae115bc7Smrj
426ae115bc7Smrj	/*
427ae115bc7Smrj	 * Our stack now matches struct regs, and is irettable.  We don't need
428ae115bc7Smrj	 * to do anything special for the hypervisor w.r.t. PS_IE since we
429ae115bc7Smrj	 * iret twice anyway; the second iret back to the hypervisor
430ae115bc7Smrj	 * will re-enable interrupts.
431ae115bc7Smrj	 */
432ae115bc7Smrj	CLI(%eax)
433ae115bc7Smrj
434ae115bc7Smrj	/* Load sanitized segment selectors */
435ae115bc7Smrj	movw	kdi_ds, %ds
436ae115bc7Smrj	movw	kdi_ds, %es
437ae115bc7Smrj	movw	kdi_fs, %fs
438ae115bc7Smrj	movw	kdi_gs, %gs
439ae115bc7Smrj	movw	kdi_ds, %ss
440ae115bc7Smrj
441ae115bc7Smrj	GET_CPUSAVE_ADDR	/* %eax = cpusave, %ebx = CPU ID */
442ae115bc7Smrj
443ae115bc7Smrj	ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
444ae115bc7Smrj
445ae115bc7Smrj	ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %edx)
446ae115bc7Smrj
447ae115bc7Smrj	movl	REG_OFF(KDIREG_EIP)(%esp), %ecx
448ae115bc7Smrj	ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
449ae115bc7Smrj
450ae115bc7Smrj	pushl	%eax
451ae115bc7Smrj	jmp	kdi_save_common_state
452ae115bc7Smrj
453ae115bc7Smrj	SET_SIZE(kdi_slave_entry)
454ae115bc7Smrj
455ae115bc7Smrj#endif	/* __lint */
456ae115bc7Smrj
457ae115bc7Smrj/*
458ae115bc7Smrj * The state of the world:
459ae115bc7Smrj *
460ae115bc7Smrj * The stack has a complete set of saved registers and segment
461ae115bc7Smrj * selectors, arranged in `struct regs' order (or vice-versa), up to
462ae115bc7Smrj * and including EFLAGS.  It also has a pointer to our cpusave area.
463ae115bc7Smrj *
464ae115bc7Smrj * We need to save a pointer to these saved registers.  We also want
465ae115bc7Smrj * to adjust the saved %esp - it should point just beyond the saved
466ae115bc7Smrj * registers to the last frame of the thread we interrupted.  Finally,
467ae115bc7Smrj * we want to clear out bits 16-31 of the saved selectors, as the
468ae115bc7Smrj * selector pushls don't automatically clear them.
469ae115bc7Smrj */
470*843e1988Sjohnlev#if !defined(__lint)
471*843e1988Sjohnlev
472*843e1988Sjohnlev	ENTRY_NP(kdi_save_common_state)
473*843e1988Sjohnlev
474ae115bc7Smrj	popl	%eax			/* the cpusave area */
475ae115bc7Smrj
476ae115bc7Smrj	movl	%esp, KRS_GREGS(%eax)	/* save ptr to current saved regs */
477ae115bc7Smrj
478ae115bc7Smrj	addl	$REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), KDIREG_OFF(KDIREG_ESP)(%esp)
479ae115bc7Smrj
480ae115bc7Smrj	andl	$0xffff, KDIREG_OFF(KDIREG_SS)(%esp)
481ae115bc7Smrj	andl	$0xffff, KDIREG_OFF(KDIREG_GS)(%esp)
482ae115bc7Smrj	andl	$0xffff, KDIREG_OFF(KDIREG_FS)(%esp)
483ae115bc7Smrj	andl	$0xffff, KDIREG_OFF(KDIREG_ES)(%esp)
484ae115bc7Smrj	andl	$0xffff, KDIREG_OFF(KDIREG_DS)(%esp)
485ae115bc7Smrj
486*843e1988Sjohnlev	pushl	%eax
487*843e1988Sjohnlev	call	kdi_trap_pass
488*843e1988Sjohnlev	cmpl	$1, %eax
489*843e1988Sjohnlev	je	kdi_pass_to_kernel
490*843e1988Sjohnlev	popl	%eax
491*843e1988Sjohnlev
492*843e1988Sjohnlev	SAVE_IDTGDT
493*843e1988Sjohnlev
494*843e1988Sjohnlev#if !defined(__xpv)
495ae115bc7Smrj	/* Save off %cr0, and clear write protect */
496ae115bc7Smrj	movl	%cr0, %ecx
497ae115bc7Smrj	movl	%ecx, KRS_CR0(%eax)
498ae115bc7Smrj	andl	$_BITNOT(CR0_WP), %ecx
499ae115bc7Smrj	movl	%ecx, %cr0
500*843e1988Sjohnlev#endif
501ae115bc7Smrj	pushl	%edi
502ae115bc7Smrj	movl	%eax, %edi
503ae115bc7Smrj
504ae115bc7Smrj	/* Save the debug registers and disable any active watchpoints */
505ae115bc7Smrj	pushl	$7
506ae115bc7Smrj	call	kdi_dreg_get
507ae115bc7Smrj	addl	$4, %esp
508ae115bc7Smrj
509ae115bc7Smrj	movl	%eax, KRS_DRCTL(%edi)
510ae115bc7Smrj	andl	$_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %eax
511ae115bc7Smrj
512ae115bc7Smrj	pushl	%eax
513ae115bc7Smrj	pushl	$7
514ae115bc7Smrj	call	kdi_dreg_set
515ae115bc7Smrj	addl	$8, %esp
516ae115bc7Smrj
517ae115bc7Smrj	pushl	$6
518ae115bc7Smrj	call	kdi_dreg_get
519ae115bc7Smrj	addl	$4, %esp
520ae115bc7Smrj	movl	%eax, KRS_DRSTAT(%edi)
521ae115bc7Smrj
522ae115bc7Smrj	pushl	$0
523ae115bc7Smrj	call	kdi_dreg_get
524ae115bc7Smrj	addl	$4, %esp
525ae115bc7Smrj	movl	%eax, KRS_DROFF(0)(%edi)
526ae115bc7Smrj
527ae115bc7Smrj	pushl	$1
528ae115bc7Smrj	call	kdi_dreg_get
529ae115bc7Smrj	addl	$4, %esp
530ae115bc7Smrj	movl	%eax, KRS_DROFF(1)(%edi)
531ae115bc7Smrj
532ae115bc7Smrj	pushl	$2
533ae115bc7Smrj	call	kdi_dreg_get
534ae115bc7Smrj	addl	$4, %esp
535ae115bc7Smrj	movl	%eax, KRS_DROFF(2)(%edi)
536ae115bc7Smrj
537ae115bc7Smrj	pushl	$3
538ae115bc7Smrj	call	kdi_dreg_get
539ae115bc7Smrj	addl	$4, %esp
540ae115bc7Smrj	movl	%eax, KRS_DROFF(3)(%edi)
541ae115bc7Smrj
542ae115bc7Smrj	movl	%edi, %eax
543ae115bc7Smrj	popl	%edi
544ae115bc7Smrj
545ae115bc7Smrj	/*
546ae115bc7Smrj	 * Save any requested MSRs.
547ae115bc7Smrj	 */
548ae115bc7Smrj	movl	KRS_MSR(%eax), %ecx
549ae115bc7Smrj	cmpl	$0, %ecx
550ae115bc7Smrj	je	no_msr
551ae115bc7Smrj
552ae115bc7Smrj	pushl	%eax		/* rdmsr clobbers %eax */
553ae115bc7Smrj	movl	%ecx, %ebx
554ae115bc7Smrj1:
555ae115bc7Smrj	movl	MSR_NUM(%ebx), %ecx
556ae115bc7Smrj	cmpl	$0, %ecx
557ae115bc7Smrj	je	msr_done
558ae115bc7Smrj
559ae115bc7Smrj	movl	MSR_TYPE(%ebx), %edx
560ae115bc7Smrj	cmpl	$KDI_MSR_READ, %edx
561ae115bc7Smrj	jne	msr_next
562ae115bc7Smrj
563ae115bc7Smrj	rdmsr			/* addr in %ecx, value into %edx:%eax */
564ae115bc7Smrj	movl	%eax, MSR_VAL(%ebx)
565ae115bc7Smrj	movl	%edx, _CONST(MSR_VAL + 4)(%ebx)
566ae115bc7Smrj
567ae115bc7Smrjmsr_next:
568ae115bc7Smrj	addl	$MSR_SIZE, %ebx
569ae115bc7Smrj	jmp	1b
570ae115bc7Smrj
571ae115bc7Smrjmsr_done:
572ae115bc7Smrj	popl	%eax
573ae115bc7Smrj
574ae115bc7Smrjno_msr:
575ae115bc7Smrj	clr	%ebp		/* stack traces should end here */
576ae115bc7Smrj
577ae115bc7Smrj	pushl	%eax
578ae115bc7Smrj	call	kdi_debugger_entry
579*843e1988Sjohnlev	popl	%eax
580ae115bc7Smrj
581ae115bc7Smrj	jmp	kdi_resume
582ae115bc7Smrj
583ae115bc7Smrj	SET_SIZE(kdi_save_common_state)
584ae115bc7Smrj
585ae115bc7Smrj#endif	/* !__lint */
586ae115bc7Smrj
587ae115bc7Smrj/*
588*843e1988Sjohnlev * Resume the world.  The code that calls kdi_resume has already
589*843e1988Sjohnlev * decided whether or not to restore the IDT.
590ae115bc7Smrj */
591*843e1988Sjohnlev#if defined(__lint)
592*843e1988Sjohnlevvoid
593*843e1988Sjohnlevkdi_resume(void)
594*843e1988Sjohnlev{
595*843e1988Sjohnlev}
596*843e1988Sjohnlev#else	/* __lint */
597*843e1988Sjohnlev
598*843e1988Sjohnlev	/* cpusave in %eax */
599*843e1988Sjohnlev	ENTRY_NP(kdi_resume)
600*843e1988Sjohnlev
601*843e1988Sjohnlev	/*
602*843e1988Sjohnlev	 * Send this CPU back into the world
603*843e1988Sjohnlev	 */
604*843e1988Sjohnlev
605*843e1988Sjohnlev#if !defined(__xpv)
606*843e1988Sjohnlev	movl	KRS_CR0(%eax), %edx
607*843e1988Sjohnlev	movl	%edx, %cr0
608*843e1988Sjohnlev#endif
609*843e1988Sjohnlev
610*843e1988Sjohnlev	pushl	%edi
611*843e1988Sjohnlev	movl	%eax, %edi
612*843e1988Sjohnlev
613*843e1988Sjohnlev	KDI_RESTORE_DEBUGGING_STATE
614*843e1988Sjohnlev
615*843e1988Sjohnlev	popl	%edi
616*843e1988Sjohnlev
617*843e1988Sjohnlev#if defined(__xpv)
618*843e1988Sjohnlev	/*
619*843e1988Sjohnlev	 * kmdb might have set PS_T in the saved eflags, so we can't use
620*843e1988Sjohnlev	 * intr_restore, since that restores all of eflags; instead, just
621*843e1988Sjohnlev	 * pick up PS_IE from the saved eflags.
622*843e1988Sjohnlev	 */
623*843e1988Sjohnlev	movl	REG_OFF(KDIREG_EFLAGS)(%esp), %eax
624*843e1988Sjohnlev	testl	$PS_IE, %eax
625*843e1988Sjohnlev	jz	2f
626*843e1988Sjohnlev	STI
627*843e1988Sjohnlev2:
628*843e1988Sjohnlev#endif
629*843e1988Sjohnlev
630*843e1988Sjohnlev	addl	$8, %esp	/* Discard savfp and savpc */
631*843e1988Sjohnlev
632*843e1988Sjohnlev	popl	%ss
633*843e1988Sjohnlev	popl	%gs
634*843e1988Sjohnlev	popl	%fs
635*843e1988Sjohnlev	popl	%es
636*843e1988Sjohnlev	popl	%ds
637*843e1988Sjohnlev	popal
638*843e1988Sjohnlev
639*843e1988Sjohnlev	addl	$8, %esp	/* Discard TRAPNO and ERROR */
640*843e1988Sjohnlev
641*843e1988Sjohnlev	IRET
642*843e1988Sjohnlev
643*843e1988Sjohnlev	SET_SIZE(kdi_resume)
644*843e1988Sjohnlev#endif	/* __lint */
645*843e1988Sjohnlev
646*843e1988Sjohnlev#if !defined(__lint)
647*843e1988Sjohnlev
648*843e1988Sjohnlev	ENTRY_NP(kdi_pass_to_kernel)
649*843e1988Sjohnlev
650*843e1988Sjohnlev	/* pop cpusave, leaving %esp pointing to saved regs */
651*843e1988Sjohnlev	popl	%eax
652*843e1988Sjohnlev
653*843e1988Sjohnlev	movl	$KDI_CPU_STATE_NONE, KRS_CPU_STATE(%eax)
654*843e1988Sjohnlev
655*843e1988Sjohnlev	/*
656*843e1988Sjohnlev	 * Find the trap and vector off the right kernel handler.  The trap
657*843e1988Sjohnlev	 * handler will expect the stack to be in trap order, with %eip being
658*843e1988Sjohnlev	 * the last entry, so we'll need to restore all our regs.
659*843e1988Sjohnlev	 *
660*843e1988Sjohnlev	 * We're hard-coding the three cases where KMDB has installed permanent
661*843e1988Sjohnlev	 * handlers, since after we restore, we don't have registers to work
662*843e1988Sjohnlev	 * with; we can't use a global since other CPUs can easily pass through
663*843e1988Sjohnlev	 * here at the same time.
664*843e1988Sjohnlev	 *
665*843e1988Sjohnlev	 * Note that we handle T_DBGENTR since userspace might have tried it.
666*843e1988Sjohnlev	 */
667*843e1988Sjohnlev	movl	REG_OFF(KDIREG_TRAPNO)(%esp), %eax
668*843e1988Sjohnlev	cmpl	$T_SGLSTP, %eax
669*843e1988Sjohnlev	je	kpass_dbgtrap
670*843e1988Sjohnlev	cmpl	$T_BPTFLT, %eax
671*843e1988Sjohnlev	je	kpass_brktrap
672*843e1988Sjohnlev	cmpl	$T_DBGENTR, %eax
673*843e1988Sjohnlev	je	kpass_invaltrap
674*843e1988Sjohnlev	/*
675*843e1988Sjohnlev	 * Hmm, unknown handler.  Somebody forgot to update this when they
676*843e1988Sjohnlev	 * added a new trap interposition... try to drop back into kmdb.
677*843e1988Sjohnlev	 */
678*843e1988Sjohnlev	int	$T_DBGENTR
679*843e1988Sjohnlev
680*843e1988Sjohnlevkpass_dbgtrap:
681*843e1988Sjohnlev	KDI_RESTORE_REGS()
682*843e1988Sjohnlev	ljmp	$KCS_SEL, $1f
683*843e1988Sjohnlev1:	jmp	%cs:dbgtrap
684*843e1988Sjohnlev	/*NOTREACHED*/
685*843e1988Sjohnlev
686*843e1988Sjohnlevkpass_brktrap:
687*843e1988Sjohnlev	KDI_RESTORE_REGS()
688*843e1988Sjohnlev	ljmp	$KCS_SEL, $2f
689*843e1988Sjohnlev2:	jmp	%cs:brktrap
690*843e1988Sjohnlev	/*NOTREACHED*/
691*843e1988Sjohnlev
692*843e1988Sjohnlevkpass_invaltrap:
693*843e1988Sjohnlev	KDI_RESTORE_REGS()
694*843e1988Sjohnlev	ljmp	$KCS_SEL, $3f
695*843e1988Sjohnlev3:	jmp	%cs:invaltrap
696*843e1988Sjohnlev	/*NOTREACHED*/
697*843e1988Sjohnlev
698*843e1988Sjohnlev	SET_SIZE(kdi_pass_to_kernel)
699*843e1988Sjohnlev
700*843e1988Sjohnlev	/*
701*843e1988Sjohnlev	 * A minimal version of mdboot(), to be used by the master CPU only.
702*843e1988Sjohnlev	 */
703*843e1988Sjohnlev	ENTRY_NP(kdi_reboot)
704*843e1988Sjohnlev
705*843e1988Sjohnlev	pushl	$AD_BOOT
706*843e1988Sjohnlev	pushl	$A_SHUTDOWN
707*843e1988Sjohnlev	call	*psm_shutdownf
708*843e1988Sjohnlev	addl	$8, %esp
709*843e1988Sjohnlev
710*843e1988Sjohnlev#if defined(__xpv)
711*843e1988Sjohnlev	pushl	$SHUTDOWN_reboot
712*843e1988Sjohnlev	call	HYPERVISOR_shutdown
713*843e1988Sjohnlev#else
714*843e1988Sjohnlev	call	reset
715*843e1988Sjohnlev#endif
716*843e1988Sjohnlev	/*NOTREACHED*/
717*843e1988Sjohnlev
718*843e1988Sjohnlev	SET_SIZE(kdi_reboot)
719*843e1988Sjohnlev
720*843e1988Sjohnlev#endif	/* !__lint */
721ae115bc7Smrj
722ae115bc7Smrj#if defined(__lint)
723ae115bc7Smrj/*ARGSUSED*/
724ae115bc7Smrjvoid
725ae115bc7Smrjkdi_cpu_debug_init(kdi_cpusave_t *save)
726ae115bc7Smrj{
727ae115bc7Smrj}
728ae115bc7Smrj#else	/* __lint */
729ae115bc7Smrj
730ae115bc7Smrj	ENTRY_NP(kdi_cpu_debug_init)
731ae115bc7Smrj	pushl	%ebp
732ae115bc7Smrj	movl	%esp, %ebp
733ae115bc7Smrj
734ae115bc7Smrj	pushl	%edi
735ae115bc7Smrj	pushl	%ebx
736ae115bc7Smrj
737ae115bc7Smrj	movl	8(%ebp), %edi
738ae115bc7Smrj
739ae115bc7Smrj	KDI_RESTORE_DEBUGGING_STATE
740ae115bc7Smrj
741ae115bc7Smrj	popl	%ebx
742ae115bc7Smrj	popl	%edi
743ae115bc7Smrj	leave
744ae115bc7Smrj	ret
745ae115bc7Smrj
746ae115bc7Smrj	SET_SIZE(kdi_cpu_debug_init)
747ae115bc7Smrj#endif	/* !__lint */
748ae115bc7Smrj
749