xref: /titanic_50/usr/src/uts/intel/kdi/ia32/kdi_asm.s (revision dfb96a4f56fb431b915bc67e5d9d5c8d4f4f6679)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 * Debugger entry for both master and slave CPUs
31 */
32
33#if defined(__lint)
34#include <sys/types.h>
35#endif
36
37#include <sys/segments.h>
38#include <sys/asm_linkage.h>
39#include <sys/controlregs.h>
40#include <sys/x86_archext.h>
41#include <sys/privregs.h>
42#include <sys/machprivregs.h>
43#include <sys/kdi_regs.h>
44#include <sys/uadmin.h>
45#include <sys/psw.h>
46
47#ifdef _ASM
48
49#include <kdi_assym.h>
50#include <assym.h>
51
52/* clobbers %edx, %ecx, returns addr in %eax, cpu id in %ebx */
53#define	GET_CPUSAVE_ADDR \
54	movl	%gs:CPU_ID, %ebx;		\
55	movl	%ebx, %eax;			\
56	movl	$KRS_SIZE, %ecx;		\
57	mull	%ecx;				\
58	movl	$kdi_cpusave, %edx;		\
59	/*CSTYLED*/				\
60	addl	(%edx), %eax
61
62/*
63 * Save copies of the IDT and GDT descriptors.  Note that we only save the IDT
64 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
65 * debugger through the trap handler.  We don't want to clobber the saved IDT
66 * in the process, as we'd end up resuming the world on our IDT.
67 */
68#define	SAVE_IDTGDT				\
69	movl	%gs:CPU_IDT, %edx;		\
70	cmpl	$kdi_idt, %edx;			\
71	je	1f;				\
72	movl	%edx, KRS_IDT(%eax);		\
73	movl	%gs:CPU_GDT, %edx;		\
74	movl	%edx, KRS_GDT(%eax);		\
751:
76
77/*
78 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
79 * The following macros manage the buffer.
80 */
81
82/* Advance the ring buffer */
83#define	ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
84	movl	KRS_CURCRUMBIDX(cpusave), tmp1;	\
85	cmpl	$[KDI_NCRUMBS - 1], tmp1;	\
86	jge	1f;				\
87	/* Advance the pointer and index */	\
88	addl	$1, tmp1;			\
89	movl	tmp1, KRS_CURCRUMBIDX(cpusave);	\
90	movl	KRS_CURCRUMB(cpusave), tmp1;	\
91	addl	$KRM_SIZE, tmp1;		\
92	jmp	2f;				\
931:	/* Reset the pointer and index */	\
94	movw	$0, KRS_CURCRUMBIDX(cpusave);	\
95	leal	KRS_CRUMBS(cpusave), tmp1;	\
962:	movl	tmp1, KRS_CURCRUMB(cpusave);	\
97	/* Clear the new crumb */		\
98	movl	$KDI_NCRUMBS, tmp2;		\
993:	movl	$0, -4(tmp1, tmp2, 4);		\
100	decl	tmp2;				\
101	jnz	3b
102
103/* Set a value in the current breadcrumb buffer */
104#define	ADD_CRUMB(cpusave, offset, value, tmp) \
105	movl	KRS_CURCRUMB(cpusave), tmp;	\
106	movl	value, offset(tmp)
107
108#endif	/* _ASM */
109
110/*
111 * The main entry point for master CPUs.  It also serves as the trap handler
112 * for all traps and interrupts taken during single-step.
113 */
114#if defined(__lint)
115void
116kdi_cmnint(void)
117{
118}
119#else	/* __lint */
120
121 	/* XXX implement me */
122	ENTRY_NP(kdi_nmiint)
123	clr	%ecx
124	movl	(%ecx), %ecx
125	SET_SIZE(kdi_nmiint)
126
127	ENTRY_NP(kdi_cmnint)
128	ALTENTRY(kdi_master_entry)
129
130	/* Save all registers and selectors */
131
132	pushal
133	pushl	%ds
134	pushl	%es
135	pushl	%fs
136	pushl	%gs
137	pushl	%ss
138
139	subl	$8, %esp
140	movl	%ebp, REG_OFF(KDIREG_SAVFP)(%esp)
141	movl	REG_OFF(KDIREG_EIP)(%esp), %eax
142	movl	%eax, REG_OFF(KDIREG_SAVPC)(%esp)
143
144	/*
145	 * If the kernel has started using its own selectors, we should too.
146	 * Update our saved selectors if they haven't been updated already.
147	 */
148	movw	%cs, %ax
149	cmpw	$KCS_SEL, %ax
150	jne	1f			/* The kernel hasn't switched yet */
151
152	movw	$KDS_SEL, %ax
153	movw	%ax, %ds
154	movw	kdi_cs, %ax
155	cmpw	$KCS_SEL, %ax
156	je	1f			/* We already switched */
157
158	/*
159	 * The kernel switched, but we haven't.  Update our saved selectors
160	 * to match the kernel's copies for use below.
161	 */
162	movl	$KCS_SEL, kdi_cs
163	movl	$KDS_SEL, kdi_ds
164	movl	$KFS_SEL, kdi_fs
165	movl	$KGS_SEL, kdi_gs
166
1671:
168	/*
169	 * Set the selectors to a known state.  If we come in from kmdb's IDT,
170	 * we'll be on boot's %cs.  This will cause GET_CPUSAVE_ADDR to return
171	 * CPU 0's cpusave, regardless of which CPU we're on, and chaos will
172	 * ensue.  So, if we've got $KCSSEL in kdi_cs, switch to it.  The other
173	 * selectors are restored normally.
174	 */
175	movw	%cs:kdi_cs, %ax
176	cmpw	$KCS_SEL, %ax
177	jne	1f
178	ljmp	$KCS_SEL, $1f
1791:
180	movw	%cs:kdi_ds, %ds
181	movw	kdi_ds, %es
182	movw	kdi_fs, %fs
183	movw	kdi_gs, %gs
184	movw	kdi_ds, %ss
185
186	/*
187	 * This has to come after we set %gs to the kernel descriptor.  Since
188	 * we've hijacked some IDT entries used in user-space such as the
189	 * breakpoint handler, we can enter kdi_cmnint() with GDT_LWPGS used
190	 * in %gs.  On the hypervisor, CLI() needs GDT_GS to access the machcpu.
191	 */
192	CLI(%eax)
193
194	GET_CPUSAVE_ADDR		/* %eax = cpusave, %ebx = CPU ID */
195
196	ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
197
198	ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %edx)
199
200	movl	REG_OFF(KDIREG_EIP)(%esp), %ecx
201	ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
202	ADD_CRUMB(%eax, KRM_SP, %esp, %edx)
203	movl	REG_OFF(KDIREG_TRAPNO)(%esp), %ecx
204	ADD_CRUMB(%eax, KRM_TRAPNO, %ecx, %edx)
205
206	movl	%esp, %ebp
207	pushl	%eax
208
209	/*
210	 * Were we in the debugger when we took the trap (i.e. was %esp in one
211	 * of the debugger's memory ranges)?
212	 */
213	leal	kdi_memranges, %ecx
214	movl	kdi_nmemranges, %edx
2151:	cmpl	MR_BASE(%ecx), %esp
216	jl	2f		/* below this range -- try the next one */
217	cmpl	MR_LIM(%ecx), %esp
218	jg	2f		/* above this range -- try the next one */
219	jmp	3f		/* matched within this range */
220
2212:	decl	%edx
222	jz	kdi_save_common_state	/* %esp not within debugger memory */
223	addl	$MR_SIZE, %ecx
224	jmp	1b
225
2263:	/*
227	 * %esp was within one of the debugger's memory ranges.  This should
228	 * only happen when we take a trap while running in the debugger.
229	 * kmdb_dpi_handle_fault will determine whether or not it was an
230	 * expected trap, and will take the appropriate action.
231	 */
232
233	pushl	%ebx			/* cpuid */
234
235	movl	REG_OFF(KDIREG_ESP)(%ebp), %ecx
236	addl	$REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), %ecx
237	pushl	%ecx
238
239	pushl	REG_OFF(KDIREG_EIP)(%ebp)
240	pushl	REG_OFF(KDIREG_TRAPNO)(%ebp)
241
242	call	kdi_dvec_handle_fault
243	addl	$16, %esp
244
245	/*
246	 * If we're here, we ran into a debugger problem, and the user
247	 * elected to solve it by having the debugger debug itself.  The
248	 * state we're about to save is that of the debugger when it took
249	 * the fault.
250	 */
251
252	jmp	kdi_save_common_state
253
254	SET_SIZE(kdi_master_entry)
255	SET_SIZE(kdi_cmnint)
256
257#endif	/* __lint */
258
259/*
260 * The cross-call handler for slave CPUs.
261 *
262 * The debugger is single-threaded, so only one CPU, called the master, may be
263 * running it at any given time.  The other CPUs, known as slaves, spin in a
264 * busy loop until there's something for them to do.  This is the entry point
265 * for the slaves - they'll be sent here in response to a cross-call sent by the
266 * master.
267 */
268
269#if defined(__lint)
270char kdi_slave_entry_patch;
271
272void
273kdi_slave_entry(void)
274{
275}
276#else /* __lint */
277	.globl	kdi_slave_entry_patch;
278
279	ENTRY_NP(kdi_slave_entry)
280
281	/* kdi_msr_add_clrentry knows where this is */
282kdi_slave_entry_patch:
283	KDI_MSR_PATCH;
284
285	/*
286	 * Cross calls are implemented as function calls, so our stack
287	 * currently looks like one you'd get from a zero-argument function
288	 * call. There's an %eip at %esp, and that's about it.  We want to
289	 * make it look like the master CPU's stack.  By doing this, we can
290	 * use the same resume code for both master and slave.  We need to
291	 * make our stack look like a `struct regs' before we jump into the
292	 * common save routine.
293	 */
294
295	pushl	%cs
296	pushfl
297	pushl	$-1		/* A phony trap error code */
298	pushl	$-1		/* A phony trap number */
299	pushal
300	pushl	%ds
301	pushl	%es
302	pushl	%fs
303	pushl	%gs
304	pushl	%ss
305
306	subl	$8, %esp
307	movl	%ebp, REG_OFF(KDIREG_SAVFP)(%esp)
308	movl	REG_OFF(KDIREG_EIP)(%esp), %eax
309	movl	%eax, REG_OFF(KDIREG_SAVPC)(%esp)
310
311	/*
312	 * Swap our saved EFLAGS and %eip.  Each is where the other
313	 * should be.
314	 */
315	movl	REG_OFF(KDIREG_EFLAGS)(%esp), %eax
316	xchgl	REG_OFF(KDIREG_EIP)(%esp), %eax
317	movl	%eax, REG_OFF(KDIREG_EFLAGS)(%esp)
318
319	/*
320	 * Our stack now matches struct regs, and is irettable.  We don't need
321	 * to do anything special for the hypervisor w.r.t. PS_IE since we
322	 * iret twice anyway; the second iret back to the hypervisor
323	 * will re-enable interrupts.
324	 */
325	CLI(%eax)
326
327	/* Load sanitized segment selectors */
328	movw	kdi_ds, %ds
329	movw	kdi_ds, %es
330	movw	kdi_fs, %fs
331	movw	kdi_gs, %gs
332	movw	kdi_ds, %ss
333
334	GET_CPUSAVE_ADDR	/* %eax = cpusave, %ebx = CPU ID */
335
336	ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
337
338	ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %edx)
339
340	movl	REG_OFF(KDIREG_EIP)(%esp), %ecx
341	ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
342
343	pushl	%eax
344	jmp	kdi_save_common_state
345
346	SET_SIZE(kdi_slave_entry)
347
348#endif	/* __lint */
349
350#if !defined(__lint)
351
352	ENTRY_NP(kdi_save_common_state)
353
354	/*
355	 * The state of the world:
356	 *
357	 * The stack has a complete set of saved registers and segment
358	 * selectors, arranged in `struct regs' order (or vice-versa), up to
359	 * and including EFLAGS.  It also has a pointer to our cpusave area.
360	 *
361	 * We need to save a pointer to these saved registers.  We also want
362	 * to adjust the saved %esp - it should point just beyond the saved
363	 * registers to the last frame of the thread we interrupted.  Finally,
364	 * we want to clear out bits 16-31 of the saved selectors, as the
365	 * selector pushls don't automatically clear them.
366	 */
367	popl	%eax			/* the cpusave area */
368
369	movl	%esp, KRS_GREGS(%eax)	/* save ptr to current saved regs */
370
371	SAVE_IDTGDT
372
373	addl	$REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), KDIREG_OFF(KDIREG_ESP)(%esp)
374
375	andl	$0xffff, KDIREG_OFF(KDIREG_SS)(%esp)
376	andl	$0xffff, KDIREG_OFF(KDIREG_GS)(%esp)
377	andl	$0xffff, KDIREG_OFF(KDIREG_FS)(%esp)
378	andl	$0xffff, KDIREG_OFF(KDIREG_ES)(%esp)
379	andl	$0xffff, KDIREG_OFF(KDIREG_DS)(%esp)
380
381	/* Save off %cr0, and clear write protect */
382	movl	%cr0, %ecx
383	movl	%ecx, KRS_CR0(%eax)
384	andl	$_BITNOT(CR0_WP), %ecx
385	movl	%ecx, %cr0
386	pushl	%edi
387	movl	%eax, %edi
388
389	/* Save the debug registers and disable any active watchpoints */
390	pushl	$7
391	call	kdi_dreg_get
392	addl	$4, %esp
393
394	movl	%eax, KRS_DRCTL(%edi)
395	andl	$_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %eax
396
397	pushl	%eax
398	pushl	$7
399	call	kdi_dreg_set
400	addl	$8, %esp
401
402	pushl	$6
403	call	kdi_dreg_get
404	addl	$4, %esp
405	movl	%eax, KRS_DRSTAT(%edi)
406
407	pushl	$0
408	call	kdi_dreg_get
409	addl	$4, %esp
410	movl	%eax, KRS_DROFF(0)(%edi)
411
412	pushl	$1
413	call	kdi_dreg_get
414	addl	$4, %esp
415	movl	%eax, KRS_DROFF(1)(%edi)
416
417	pushl	$2
418	call	kdi_dreg_get
419	addl	$4, %esp
420	movl	%eax, KRS_DROFF(2)(%edi)
421
422	pushl	$3
423	call	kdi_dreg_get
424	addl	$4, %esp
425	movl	%eax, KRS_DROFF(3)(%edi)
426
427	movl	%edi, %eax
428	popl	%edi
429
430	/*
431	 * Save any requested MSRs.
432	 */
433	movl	KRS_MSR(%eax), %ecx
434	cmpl	$0, %ecx
435	je	no_msr
436
437	pushl	%eax		/* rdmsr clobbers %eax */
438	movl	%ecx, %ebx
4391:
440	movl	MSR_NUM(%ebx), %ecx
441	cmpl	$0, %ecx
442	je	msr_done
443
444	movl	MSR_TYPE(%ebx), %edx
445	cmpl	$KDI_MSR_READ, %edx
446	jne	msr_next
447
448	rdmsr			/* addr in %ecx, value into %edx:%eax */
449	movl	%eax, MSR_VAL(%ebx)
450	movl	%edx, _CONST(MSR_VAL + 4)(%ebx)
451
452msr_next:
453	addl	$MSR_SIZE, %ebx
454	jmp	1b
455
456msr_done:
457	popl	%eax
458
459no_msr:
460	clr	%ebp		/* stack traces should end here */
461
462	pushl	%eax
463	call	kdi_debugger_entry
464	pushl	%eax		/* leave cpusave on the stack */
465
466	jmp	kdi_resume
467
468	SET_SIZE(kdi_save_common_state)
469
470#endif	/* !__lint */
471
472/*
473 * Given the address of the current CPU's cpusave area in %edi, the following
474 * macro restores the debugging state to said CPU.  Restored state includes
475 * the debug registers from the global %dr variables, and debugging MSRs from
476 * the CPU save area.  This code would be in a separate routine, but for the
477 * fact that some of the MSRs are jump-sensitive.  As such, we need to minimize
478 * the number of jumps taken subsequent to the update of said MSRs.  We can
479 * remove one jump (the ret) by using a macro instead of a function for the
480 * debugging state restoration code.
481 *
482 * Takes the cpusave area in %edi as a parameter, clobbers %eax-%edx
483 */
484#define	KDI_RESTORE_DEBUGGING_STATE \
485	leal	kdi_drreg, %ebx;				\
486								\
487	pushl	DR_CTL(%ebx);					\
488	pushl	$7;						\
489	call	kdi_dreg_set;					\
490	addl	$8, %esp;					\
491								\
492	pushl	$KDIREG_DRSTAT_RESERVED;				\
493	pushl	$6;						\
494	call	kdi_dreg_set;					\
495	addl	$8, %esp;					\
496								\
497	pushl	DRADDR_OFF(0)(%ebx);				\
498	pushl	$0;						\
499	call	kdi_dreg_set;					\
500	addl	$8, %esp;					\
501								\
502	pushl	DRADDR_OFF(1)(%ebx);				\
503	pushl	$1;						\
504	call	kdi_dreg_set;			 		\
505	addl	$8, %esp;					\
506								\
507	pushl	DRADDR_OFF(2)(%ebx);				\
508	pushl	$2;						\
509	call	kdi_dreg_set;					\
510	addl	$8, %esp;					\
511								\
512	pushl	DRADDR_OFF(3)(%ebx);				\
513	pushl	$3;						\
514	call	kdi_dreg_set;					\
515	addl	$8, %esp;					\
516								\
517	/*							\
518	 * Write any requested MSRs.				\
519	 */							\
520	movl	KRS_MSR(%edi), %ebx;				\
521	cmpl	$0, %ebx;					\
522	je	3f;						\
5231:								\
524	movl	MSR_NUM(%ebx), %ecx;				\
525	cmpl	$0, %ecx;					\
526	je	3f;						\
527								\
528	movl	MSR_TYPE(%ebx), %edx;				\
529	cmpl	$KDI_MSR_WRITE, %edx;				\
530	jne	2f;						\
531								\
532	movl	MSR_VALP(%ebx), %edx;				\
533	movl	0(%edx), %eax;					\
534	movl	4(%edx), %edx;					\
535	wrmsr;							\
5362:								\
537	addl	$MSR_SIZE, %ebx;				\
538	jmp	1b;						\
5393:								\
540	/*							\
541	 * We must not branch after re-enabling LBR.  If	\
542	 * kdi_wsr_wrexit_msr is set, it contains the number	\
543	 * of the MSR that controls LBR.  kdi_wsr_wrexit_valp	\
544	 * contains the value that is to be written to enable	\
545	 * LBR.							\
546	 */							\
547	movl	kdi_msr_wrexit_msr, %ecx;			\
548	cmpl	$0, %ecx;					\
549	je	1f;						\
550								\
551	movl	kdi_msr_wrexit_valp, %edx;			\
552	movl	0(%edx), %eax;					\
553	movl	4(%edx), %edx;					\
554								\
555	wrmsr;							\
5561:
557
558#if defined(__lint)
559/*ARGSUSED*/
560void
561kdi_cpu_debug_init(kdi_cpusave_t *save)
562{
563}
564#else	/* __lint */
565
566	ENTRY_NP(kdi_cpu_debug_init)
567	pushl	%ebp
568	movl	%esp, %ebp
569
570	pushl	%edi
571	pushl	%ebx
572
573	movl	8(%ebp), %edi
574
575	KDI_RESTORE_DEBUGGING_STATE
576
577	popl	%ebx
578	popl	%edi
579	leave
580	ret
581
582	SET_SIZE(kdi_cpu_debug_init)
583#endif	/* !__lint */
584
585/*
586 * Resume the world.  The code that calls kdi_resume has already
587 * decided whether or not to restore the IDT.
588 */
589#if defined(__lint)
590void
591kdi_resume(void)
592{
593}
594#else	/* __lint */
595
596	ENTRY_NP(kdi_resume)
597	popl	%ebx		/* command */
598	popl	%eax		/* cpusave */
599
600	cmpl	$KDI_RESUME_PASS_TO_KERNEL, %ebx
601	je	kdi_pass_to_kernel
602
603	/*
604	 * Send this CPU back into the world
605	 */
606
607	movl	KRS_CR0(%eax), %edx
608	movl	%edx, %cr0
609
610	pushl	%edi
611	movl	%eax, %edi
612
613	KDI_RESTORE_DEBUGGING_STATE
614
615	popl	%edi
616
617	addl	$8, %esp	/* Discard savfp and savpc */
618
619	popl	%ss
620	popl	%gs
621	popl	%fs
622	popl	%es
623	popl	%ds
624	popal
625
626	addl	$8, %esp	/* Discard TRAPNO and ERROR */
627
628	IRET
629
630	SET_SIZE(kdi_resume)
631#endif	/* __lint */
632
633#if !defined(__lint)
634
635	ENTRY_NP(kdi_pass_to_kernel)
636
637	/* cpusave is still in %eax */
638	movl	KRS_CR0(%eax), %edx
639	movl	%edx, %cr0
640
641	/*
642	 * When we replaced the kernel's handlers in the IDT, we made note of
643	 * the handlers being replaced, thus allowing us to pass traps directly
644	 * to said handlers here.  We won't have any registers available for use
645	 * after we start popping, and we know we're single-threaded here, so
646	 * we have to use a global to store the handler address.
647	 */
648	pushl	REG_OFF(KDIREG_TRAPNO)(%esp)
649	call	kdi_kernel_trap2hdlr
650	addl	$4, %esp
651	movl	%eax, kdi_kernel_handler
652
653	/*
654	 * The trap handler will expect the stack to be in trap order, with
655	 * %eip being the last entry.  Our stack is currently in KDIREG_*
656	 * order, so we'll need to pop (and restore) our way back down.
657	 */
658	addl	$8, %esp	/* Discard savfp and savpc */
659	popl	%ss
660	popl	%gs
661	popl	%fs
662	popl	%es
663	popl	%ds
664	popal
665	addl	$8, %esp	/* Discard trapno and err */
666
667	ljmp	$KCS_SEL, $1f
6681:	jmp	*%cs:kdi_kernel_handler
669	/*NOTREACHED*/
670
671	SET_SIZE(kdi_pass_to_kernel)
672
673	/*
674	 * Reboot the system.  This routine is to be called only by the master
675	 * CPU.
676	 */
677	ENTRY_NP(kdi_reboot)
678
679	pushl	$AD_BOOT
680	pushl	$A_SHUTDOWN
681	call	*psm_shutdownf
682	addl	$8, %esp
683
684	/*
685	 * psm_shutdown didn't work or it wasn't set, try pc_reset.
686	 */
687	call	pc_reset
688	/*NOTREACHED*/
689
690	SET_SIZE(kdi_reboot)
691
692#endif	/* !__lint */
693