xref: /titanic_41/usr/src/uts/intel/ia32/ml/exception.s (revision 40e5e17b3361b3eea56a9723071c406894a20b78)
1/*
2 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
3 * Use is subject to license terms.
4 */
5
6/*
7 * Copyright (c) 1989, 1990 William F. Jolitz.
8 * Copyright (c) 1990 The Regents of the University of California.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
40 */
41
42#pragma ident	"%Z%%M%	%I%	%E% SMI"
43
44#include <sys/asm_linkage.h>
45#include <sys/asm_misc.h>
46#include <sys/trap.h>
47#include <sys/psw.h>
48#include <sys/regset.h>
49#include <sys/privregs.h>
50#include <sys/dtrace.h>
51#include <sys/x86_archext.h>
52#include <sys/traptrace.h>
53#include <sys/machparam.h>
54
55/*
56 * only one routine in this file is interesting to lint
57 */
58
59#if defined(__lint)
60
61void
62ndptrap_frstor(void)
63{}
64
65#else
66
67#include "assym.h"
68
69/*
70 * push $0 on stack for traps that do not
71 * generate an error code. This is so the rest
72 * of the kernel can expect a consistent stack
73 * from from any exception.
74 */
75
76#define	TRAP_NOERR(trapno)	\
77	push	$0;		\
78	push	$trapno
79
80#define	NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
81
82/*
83 * error code already pushed by hw
84 * onto stack.
85 */
86#define	TRAP_ERR(trapno)	\
87	push	$trapno
88
89
90	/*
91	 * #DE
92	 */
93	ENTRY_NP(div0trap)
94	TRAP_NOERR(T_ZERODIV)	/* $0 */
95	jmp	cmntrap
96	SET_SIZE(div0trap)
97
98	/*
99	 * #DB
100	 *
101	 * Fetch %dr6 and clear it, handing off the value to the
102	 * cmntrap code in %r15/%esi
103	 */
104	ENTRY_NP(dbgtrap)
105	TRAP_NOERR(T_SGLSTP)	/* $1 */
106
107#if defined(__amd64)
108	/*
109	 * If we get here as a result of single-stepping a sysenter
110	 * instruction, we suddenly find ourselves taking a #db
111	 * in kernel mode -before- we've swapgs'ed.  So before we can
112	 * take the trap, we do the swapgs here, and fix the return
113	 * %rip in trap() so that we return immediately after the
114	 * swapgs in the sysenter handler to avoid doing the swapgs again.
115	 *
116	 * Nobody said that the design of sysenter was particularly
117	 * elegant, did they?
118	 */
119	pushq	%r11
120	leaq	sys_sysenter(%rip), %r11
121	cmpq	%r11, 8(%rsp)
122	jne	1f
123	SWAPGS
1241:	popq	%r11
125
126	INTR_PUSH
127	movq	%db6, %r15
128	xorl	%eax, %eax
129	movq	%rax, %db6
130
131#elif defined(__i386)
132
133	INTR_PUSH
134	movl	%db6, %esi
135	xorl	%eax, %eax
136	movl	%eax, %db6
137#endif	/* __i386 */
138
139	jmp	cmntrap_pushed
140	SET_SIZE(dbgtrap)
141
142#if defined(__amd64)
143
144/*
145 * Macro to set the gsbase or kgsbase to the address of the struct cpu
146 * for this processor.  If we came from userland, set kgsbase else
147 * set gsbase.  We find the proper cpu struct by looping through
148 * the cpu structs for all processors till we find a match for the gdt
149 * of the trapping processor.  The stack is expected to be pointing at
150 * the standard regs pushed by hardware on a trap (plus error code and trapno).
151 */
152#define	SET_CPU_GSBASE							\
153	subq	$REGOFF_TRAPNO, %rsp;	/* save regs */			\
154	movq	%rax, REGOFF_RAX(%rsp);					\
155	movq	%rbx, REGOFF_RBX(%rsp);					\
156	movq	%rcx, REGOFF_RCX(%rsp);					\
157	movq	%rdx, REGOFF_RDX(%rsp);					\
158	movq	%rbp, REGOFF_RBP(%rsp);					\
159	movq	%rsp, %rbp;						\
160	subq	$16, %rsp;		/* space for gdt */		\
161	sgdt	6(%rsp);						\
162	movq	8(%rsp), %rcx;		/* %rcx has gdt to match */	\
163	xorl	%ebx, %ebx;		/* loop index */		\
164	leaq	cpu(%rip), %rdx;	/* cpu pointer array */		\
1651:									\
166	movq	(%rdx, %rbx, CLONGSIZE), %rax;	/* get cpu[i] */	\
167	cmpq	$0x0, %rax;		/* cpu[i] == NULL ? */		\
168	je	2f;			/* yes, continue */		\
169	cmpq	%rcx, CPU_GDT(%rax);	/* gdt == cpu[i]->cpu_gdt ? */	\
170	je	3f;			/* yes, go set gsbase */	\
1712:									\
172	incl	%ebx;			/* i++ */			\
173	cmpl	$NCPU, %ebx;		/* i < NCPU ? */		\
174	jb	1b;			/* yes, loop */			\
175/* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */	\
1763:									\
177	movl	$MSR_AMD_KGSBASE, %ecx;					\
178	cmpw	$KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */	\
179	jne	4f;			/* no, go set KGSBASE */	\
180	movl	$MSR_AMD_GSBASE, %ecx;	/* yes, set GSBASE */		\
181        mfence;				/* OPTERON_ERRATUM_88 */	\
1824:									\
183	movq	%rax, %rdx;		/* write base register */	\
184	shrq	$32, %rdx;						\
185	wrmsr;								\
186	movq	REGOFF_RDX(%rbp), %rdx;	/* restore regs */		\
187	movq	REGOFF_RCX(%rbp), %rcx;					\
188	movq	REGOFF_RBX(%rbp), %rbx;					\
189	movq	REGOFF_RAX(%rbp), %rax;					\
190	movq	%rbp, %rsp;						\
191	movq	REGOFF_RBP(%rsp), %rbp;					\
192	addq	$REGOFF_TRAPNO, %rsp	/* pop stack */
193
194#endif	/* __amd64 */
195
196
197#if defined(__amd64)
198
199	/*
200	 * #NMI
201	 */
202	ENTRY_NP(nmiint)
203	TRAP_NOERR(T_NMIFLT)	/* $2 */
204
205	SET_CPU_GSBASE
206
207	/*
208	 * Save all registers and setup segment registers
209	 * with kernel selectors.
210	 */
211	INTR_PUSH
212	INTGATE_INIT_KERNEL_FLAGS
213
214	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
215	TRACE_REGS(%r12, %rsp, %rax, %rbx)
216	TRACE_STAMP(%r12)
217
218	movq	%rsp, %rbp
219
220	movq	%rbp, %rdi
221	call	av_dispatch_nmivect
222
223	INTR_POP
224	IRET
225	/*NOTREACHED*/
226	SET_SIZE(nmiint)
227
228#elif defined(__i386)
229
230	/*
231	 * #NMI
232	 */
233	ENTRY_NP(nmiint)
234	TRAP_NOERR(T_NMIFLT)	/* $2 */
235
236	/*
237	 * Save all registers and setup segment registers
238	 * with kernel selectors.
239	 */
240	INTR_PUSH
241	INTGATE_INIT_KERNEL_FLAGS
242
243	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
244	TRACE_REGS(%edi, %esp, %ebx, %ecx)
245	TRACE_STAMP(%edi)
246
247	movl	%esp, %ebp
248
249	pushl	%ebp
250	call	av_dispatch_nmivect
251	addl	$4, %esp
252
253	INTR_POP_USER
254	IRET
255	SET_SIZE(nmiint)
256
257#endif	/* __i386 */
258
259	/*
260	 * #BP
261	 */
262	ENTRY_NP(brktrap)
263
264#if defined(__amd64)
265	cmpw	$KCS_SEL, 8(%rsp)
266	jne	bp_user
267
268	/*
269	 * This is a breakpoint in the kernel -- it is very likely that this
270	 * is DTrace-induced.  To unify DTrace handling, we spoof this as an
271	 * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
272	 * we must decrement the trapping %rip to make it appear as a fault.
273	 * We then push a non-zero error code to indicate that this is coming
274	 * from #BP.
275	 */
276	decq	(%rsp)
277	push	$1			/* error code -- non-zero for #BP */
278	jmp	ud_kernel
279
280bp_user:
281#endif /* __amd64 */
282
283	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
284	jmp	dtrace_trap
285
286	SET_SIZE(brktrap)
287
288	/*
289	 * #OF
290	 */
291	ENTRY_NP(ovflotrap)
292	TRAP_NOERR(T_OVFLW)	/* $4 */
293	jmp	cmntrap
294	SET_SIZE(ovflotrap)
295
296	/*
297	 * #BR
298	 */
299	ENTRY_NP(boundstrap)
300	TRAP_NOERR(T_BOUNDFLT)	/* $5 */
301	jmp	cmntrap
302	SET_SIZE(boundstrap)
303
304#if defined(__amd64)
305
306	ENTRY_NP(invoptrap)
307
308	cmpw	$KCS_SEL, 8(%rsp)
309	jne	ud_user
310
311	push	$0			/* error code -- zero for #UD */
312ud_kernel:
313	push	$0xdddd			/* a dummy trap number */
314	INTR_PUSH
315	movq	REGOFF_RIP(%rsp), %rdi
316	movq	REGOFF_RSP(%rsp), %rsi
317	movq	REGOFF_RAX(%rsp), %rdx
318	pushq	(%rsi)
319	movq	%rsp, %rsi
320	call	dtrace_invop
321	ALTENTRY(dtrace_invop_callsite)
322	addq	$8, %rsp
323	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
324	je	ud_push
325	cmpl	$DTRACE_INVOP_LEAVE, %eax
326	je	ud_leave
327	cmpl	$DTRACE_INVOP_NOP, %eax
328	je	ud_nop
329	cmpl	$DTRACE_INVOP_RET, %eax
330	je	ud_ret
331	jmp	ud_trap
332
333ud_push:
334	/*
335	 * We must emulate a "pushq %rbp".  To do this, we pull the stack
336	 * down 8 bytes, and then store the base pointer.
337	 */
338	INTR_POP
339	subq	$16, %rsp		/* make room for %rbp */
340	pushq	%rax			/* push temp */
341	movq	24(%rsp), %rax		/* load calling RIP */
342	addq	$1, %rax		/* increment over trapping instr */
343	movq	%rax, 8(%rsp)		/* store calling RIP */
344	movq	32(%rsp), %rax		/* load calling CS */
345	movq	%rax, 16(%rsp)		/* store calling CS */
346	movq	40(%rsp), %rax		/* load calling RFLAGS */
347	movq	%rax, 24(%rsp)		/* store calling RFLAGS */
348	movq	48(%rsp), %rax		/* load calling RSP */
349	subq	$8, %rax		/* make room for %rbp */
350	movq	%rax, 32(%rsp)		/* store calling RSP */
351	movq	56(%rsp), %rax		/* load calling SS */
352	movq	%rax, 40(%rsp)		/* store calling SS */
353	movq	32(%rsp), %rax		/* reload calling RSP */
354	movq	%rbp, (%rax)		/* store %rbp there */
355	popq	%rax			/* pop off temp */
356	IRET				/* return from interrupt */
357	/*NOTREACHED*/
358
359ud_leave:
360	/*
361	 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
362	 * followed by a "popq %rbp".  This is quite a bit simpler on amd64
363	 * than it is on i386 -- we can exploit the fact that the %rsp is
364	 * explicitly saved to effect the pop without having to reshuffle
365	 * the other data pushed for the trap.
366	 */
367	INTR_POP
368	pushq	%rax			/* push temp */
369	movq	8(%rsp), %rax		/* load calling RIP */
370	addq	$1, %rax		/* increment over trapping instr */
371	movq	%rax, 8(%rsp)		/* store calling RIP */
372	movq	(%rbp), %rax		/* get new %rbp */
373	addq	$8, %rbp		/* adjust new %rsp */
374	movq	%rbp, 32(%rsp)		/* store new %rsp */
375	movq	%rax, %rbp		/* set new %rbp */
376	popq	%rax			/* pop off temp */
377	IRET				/* return from interrupt */
378	/*NOTREACHED*/
379
380ud_nop:
381	/*
382	 * We must emulate a "nop".  This is obviously not hard:  we need only
383	 * advance the %rip by one.
384	 */
385	INTR_POP
386	incq	(%rsp)
387	IRET
388	/*NOTREACHED*/
389
390ud_ret:
391	INTR_POP
392	pushq	%rax			/* push temp */
393	movq	32(%rsp), %rax		/* load %rsp */
394	movq	(%rax), %rax		/* load calling RIP */
395	movq	%rax, 8(%rsp)		/* store calling RIP */
396	addq	$8, 32(%rsp)		/* adjust new %rsp */
397	popq	%rax			/* pop off temp */
398	IRET				/* return from interrupt */
399	/*NOTREACHED*/
400
401ud_trap:
402	/*
403	 * We're going to let the kernel handle this as a normal #UD.  If,
404	 * however, we came through #BP and are spoofing #UD (in this case,
405	 * the stored error value will be non-zero), we need to de-spoof
406	 * the trap by incrementing %rip and pushing T_BPTFLT.
407	 */
408	cmpq	$0, REGOFF_ERR(%rsp)
409	je	ud_ud
410	incq	REGOFF_RIP(%rsp)
411	addq	$REGOFF_RIP, %rsp
412	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
413	jmp	cmntrap
414
415ud_ud:
416	addq	$REGOFF_RIP, %rsp
417ud_user:
418	NPTRAP_NOERR(T_ILLINST)
419	jmp	cmntrap
420	SET_SIZE(invoptrap)
421
422#elif defined(__i386)
423
424	/*
425	 * #UD
426	 */
427	ENTRY_NP(invoptrap)
428	/*
429	 * If we are taking an invalid opcode trap while in the kernel, this
430	 * is likely an FBT probe point.
431	 */
432	pushl   %gs
433	cmpw	$KGS_SEL, (%esp)
434	jne	8f
435
436	addl	$4, %esp
437	pusha
438	pushl	%eax			/* push %eax -- may be return value */
439	pushl	%esp			/* push stack pointer */
440	addl	$48, (%esp)		/* adjust to incoming args */
441	pushl	40(%esp)		/* push calling EIP */
442	call	dtrace_invop
443	ALTENTRY(dtrace_invop_callsite)
444	addl	$12, %esp
445	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
446	je	1f
447	cmpl	$DTRACE_INVOP_POPL_EBP, %eax
448	je	2f
449	cmpl	$DTRACE_INVOP_LEAVE, %eax
450	je	3f
451	cmpl	$DTRACE_INVOP_NOP, %eax
452	je	4f
453	jmp	7f
4541:
455	/*
456	 * We must emulate a "pushl %ebp".  To do this, we pull the stack
457	 * down 4 bytes, and then store the base pointer.
458	 */
459	popa
460	subl	$4, %esp		/* make room for %ebp */
461	pushl	%eax			/* push temp */
462	movl	8(%esp), %eax		/* load calling EIP */
463	incl	%eax			/* increment over LOCK prefix */
464	movl	%eax, 4(%esp)		/* store calling EIP */
465	movl	12(%esp), %eax		/* load calling CS */
466	movl	%eax, 8(%esp)		/* store calling CS */
467	movl	16(%esp), %eax		/* load calling EFLAGS */
468	movl	%eax, 12(%esp)		/* store calling EFLAGS */
469	movl	%ebp, 16(%esp)		/* push %ebp */
470	popl	%eax			/* pop off temp */
471	jmp	_emul_done
4722:
473	/*
474	 * We must emulate a "popl %ebp".  To do this, we do the opposite of
475	 * the above:  we remove the %ebp from the stack, and squeeze up the
476	 * saved state from the trap.
477	 */
478	popa
479	pushl	%eax			/* push temp */
480	movl	16(%esp), %ebp		/* pop %ebp */
481	movl	12(%esp), %eax		/* load calling EFLAGS */
482	movl	%eax, 16(%esp)		/* store calling EFLAGS */
483	movl	8(%esp), %eax		/* load calling CS */
484	movl	%eax, 12(%esp)		/* store calling CS */
485	movl	4(%esp), %eax		/* load calling EIP */
486	incl	%eax			/* increment over LOCK prefix */
487	movl	%eax, 8(%esp)		/* store calling EIP */
488	popl	%eax			/* pop off temp */
489	addl	$4, %esp		/* adjust stack pointer */
490	jmp	_emul_done
4913:
492	/*
493	 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
494	 * followed by a "popl %ebp".  This looks similar to the above, but
495	 * requires two temporaries:  one for the new base pointer, and one
496	 * for the staging register.
497	 */
498	popa
499	pushl	%eax			/* push temp */
500	pushl	%ebx			/* push temp */
501	movl	%ebp, %ebx		/* set temp to old %ebp */
502	movl	(%ebx), %ebp		/* pop %ebp */
503	movl	16(%esp), %eax		/* load calling EFLAGS */
504	movl	%eax, (%ebx)		/* store calling EFLAGS */
505	movl	12(%esp), %eax		/* load calling CS */
506	movl	%eax, -4(%ebx)		/* store calling CS */
507	movl	8(%esp), %eax		/* load calling EIP */
508	incl	%eax			/* increment over LOCK prefix */
509	movl	%eax, -8(%ebx)		/* store calling EIP */
510	movl	%ebx, -4(%esp)		/* temporarily store new %esp */
511	popl	%ebx			/* pop off temp */
512	popl	%eax			/* pop off temp */
513	movl	-12(%esp), %esp		/* set stack pointer */
514	subl	$8, %esp		/* adjust for three pushes, one pop */
515	jmp	_emul_done
5164:
517	/*
518	 * We must emulate a "nop".  This is obviously not hard:  we need only
519	 * advance the %eip by one.
520	 */
521	popa
522	incl	(%esp)
523_emul_done:
524	IRET				/* return from interrupt */
5257:
526	popa
527	pushl	$0
528	pushl	$T_ILLINST	/* $6 */
529	jmp	cmntrap
5308:
531	addl	$4, %esp
532	pushl	$0
533	pushl	$T_ILLINST	/* $6 */
534	jmp	cmntrap
535	SET_SIZE(invoptrap)
536
537#endif	/* __i386 */
538
539#if defined(__amd64)
540
541	/*
542	 * #NM
543	 */
544	ENTRY_NP(ndptrap)
545	/*
546	 * We want to do this quickly as every lwp using fp will take this
547	 * after a context switch -- we do the frequent path in ndptrap_frstor
548	 * below; for all other cases, we let the trap code handle it
549	 */
550	pushq	%rax
551	pushq	%rbx
552	cmpw    $KCS_SEL, 24(%rsp)	/* did we come from kernel mode? */
553	jne     1f
554	LOADCPU(%rbx)			/* if yes, don't swapgs */
555	jmp	2f
5561:
557	SWAPGS				/* if from user, need swapgs */
558	LOADCPU(%rbx)
559	SWAPGS
5602:
561	cmpl	$0, fpu_exists(%rip)
562	je	.handle_in_trap		/* let trap handle no fp case */
563	movq	CPU_THREAD(%rbx), %rax	/* %rax = curthread */
564	movl	$FPU_EN, %ebx
565	movq	T_LWP(%rax), %rax	/* %rax = lwp */
566	testq	%rax, %rax
567	jz	.handle_in_trap		/* should not happen? */
568#if LWP_PCB_FPU	!= 0
569	addq	$LWP_PCB_FPU, %rax	/* &lwp->lwp_pcb.pcb_fpu */
570#endif
571	testl	%ebx, PCB_FPU_FLAGS(%rax)
572	jz	.handle_in_trap		/* must be the first fault */
573	clts
574	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rax)
575#if FPU_CTX_FPU_REGS != 0
576	addq	$FPU_CTX_FPU_REGS, %rax
577#endif
578	/*
579	 * the label below is used in trap.c to detect FP faults in
580	 * kernel due to user fault.
581	 */
582	ALTENTRY(ndptrap_frstor)
583	fxrstor	(%rax)
584	popq	%rbx
585	popq	%rax
586	IRET
587	/*NOTREACHED*/
588
589.handle_in_trap:
590	popq	%rbx
591	popq	%rax
592	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
593	jmp	cmninttrap
594	SET_SIZE(ndptrap_frstor)
595	SET_SIZE(ndptrap)
596
597#elif defined(__i386)
598
599	ENTRY_NP(ndptrap)
600	/*
601	 * We want to do this quickly as every lwp using fp will take this
602	 * after a context switch -- we do the frequent path in fpnoextflt
603	 * below; for all other cases, we let the trap code handle it
604	 */
605	pushl	%eax
606	pushl	%ebx
607	pushl	%ds
608	pushl	%gs
609	movl	$KDS_SEL, %ebx
610	movw	%bx, %ds
611	movl	$KGS_SEL, %eax
612	movw	%ax, %gs
613	LOADCPU(%eax)
614	cmpl	$0, fpu_exists
615	je	.handle_in_trap		/* let trap handle no fp case */
616	movl	CPU_THREAD(%eax), %ebx	/* %ebx = curthread */
617	movl	$FPU_EN, %eax
618	movl	T_LWP(%ebx), %ebx	/* %ebx = lwp */
619	testl	%ebx, %ebx
620	jz	.handle_in_trap		/* should not happen? */
621#if LWP_PCB_FPU != 0
622	addl	$LWP_PCB_FPU, %ebx 	/* &lwp->lwp_pcb.pcb_fpu */
623#endif
624	testl	%eax, PCB_FPU_FLAGS(%ebx)
625	jz	.handle_in_trap		/* must be the first fault */
626	CLTS
627	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx)
628#if FPU_CTX_FPU_REGS != 0
629	addl	$FPU_CTX_FPU_REGS, %ebx
630#endif
631	/*
632	 * the label below is used in trap.c to detect FP faults in kernel
633	 * due to user fault.
634	 */
635	ALTENTRY(ndptrap_frstor)
636	.globl	_patch_fxrstor_ebx
637_patch_fxrstor_ebx:
638	frstor	(%ebx)		/* may be patched to fxrstor */
639	nop			/* (including this byte) */
640	popl	%gs
641	popl	%ds
642	popl	%ebx
643	popl	%eax
644	IRET
645
646.handle_in_trap:
647	popl	%gs
648	popl	%ds
649	popl	%ebx
650	popl	%eax
651	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
652	jmp	cmninttrap
653	SET_SIZE(ndptrap_frstor)
654	SET_SIZE(ndptrap)
655
656#endif	/* __i386 */
657
658#if defined(__amd64)
659
660	/*
661	 * #DF
662	 */
663	ENTRY_NP(syserrtrap)
664	pushq	$T_DBLFLT
665
666	SET_CPU_GSBASE
667
668	/*
669	 * We share this handler with kmdb (if kmdb is loaded).  As such, we may
670	 * have reached this point after encountering a #df in kmdb.  If that
671	 * happens, we'll still be on kmdb's IDT.  We need to switch back to this
672	 * CPU's IDT before proceeding.  Furthermore, if we did arrive here from
673	 * kmdb, kmdb is probably in a very sickly state, and shouldn't be
674	 * entered from the panic flow.  We'll suppress that entry by setting
675	 * nopanicdebug.
676	 */
677	pushq	%rax
678	subq	$DESCTBR_SIZE, %rsp
679	sidt	(%rsp)
680	movq	%gs:CPU_IDT, %rax
681	cmpq	%rax, DTR_BASE(%rsp)
682	je	1f
683
684	movq	%rax, DTR_BASE(%rsp)
685	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
686	lidt	(%rsp)
687
688	movl	$1, nopanicdebug
689
6901:	addq	$DESCTBR_SIZE, %rsp
691	popq	%rax
692
693	DFTRAP_PUSH
694
695	/*
696	 * freeze trap trace.
697	 */
698#ifdef TRAPTRACE
699	leaq	trap_trace_freeze(%rip), %r11
700	incl	(%r11)
701#endif
702
703	ENABLE_INTR_FLAGS
704
705	movq	%rsp, %rdi	/* &regs */
706	xorl	%esi, %esi	/* clear address */
707	xorl	%edx, %edx	/* cpuid = 0 */
708	call	trap
709
710	SET_SIZE(syserrtrap)
711
712#elif defined(__i386)
713
714	/*
715	 * #DF
716	 */
717	ENTRY_NP(syserrtrap)
718	cli				/* disable interrupts */
719
720	/*
721	 * We share this handler with kmdb (if kmdb is loaded).  As such, we may
722	 * have reached this point after encountering a #df in kmdb.  If that
723	 * happens, we'll still be on kmdb's IDT.  We need to switch back to this
724	 * CPU's IDT before proceeding.  Furthermore, if we did arrive here from
725	 * kmdb, kmdb is probably in a very sickly state, and shouldn't be
726	 * entered from the panic flow.  We'll suppress that entry by setting
727	 * nopanicdebug.
728	 */
729	subl	$DESCTBR_SIZE, %esp
730	movl	%gs:CPU_IDT, %eax
731	sidt	(%esp)
732	cmpl	DTR_BASE(%esp), %eax
733	je	1f
734
735	movl	%eax, DTR_BASE(%esp)
736	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
737	lidt	(%esp)
738
739	movl	$1, nopanicdebug
740
7411:	addl	$DESCTBR_SIZE, %esp
742
743	/*
744	 * Check the CPL in the TSS to see what mode
745	 * (user or kernel) we took the fault in.  At this
746	 * point we are running in the context of the double
747	 * fault task (dftss) but the CPU's task points to
748	 * the previous task (ktss) where the process context
749	 * has been saved as the result of the task switch.
750	 */
751	movl	%gs:CPU_TSS, %eax	/* get the TSS */
752	movl	TSS_SS(%eax), %ebx	/* save the fault SS */
753	movl	TSS_ESP(%eax), %edx	/* save the fault ESP */
754	testw	$CPL_MASK, TSS_CS(%eax)	/* user mode ? */
755	jz	make_frame
756	movw	TSS_SS0(%eax), %ss	/* get on the kernel stack */
757	movl	TSS_ESP0(%eax), %esp
758
759	/*
760	 * Clear the NT flag to avoid a task switch when the process
761	 * finally pops the EFL off the stack via an iret.  Clear
762	 * the TF flag since that is what the processor does for
763	 * a normal exception. Clear the IE flag so that interrupts
764	 * remain disabled.
765	 */
766	movl	TSS_EFL(%eax), %ecx
767	andl	$_BITNOT(PS_NT|PS_T|PS_IE), %ecx
768	pushl	%ecx
769	popfl				/* restore the EFL */
770	movw	TSS_LDT(%eax), %cx	/* restore the LDT */
771	lldt	%cx
772
773	/*
774	 * Restore process segment selectors.
775	 */
776	movw	TSS_DS(%eax), %ds
777	movw	TSS_ES(%eax), %es
778	movw	TSS_FS(%eax), %fs
779	movw	TSS_GS(%eax), %gs
780
781	/*
782	 * Restore task segment selectors.
783	 */
784	movl	$KDS_SEL, TSS_DS(%eax)
785	movl	$KDS_SEL, TSS_ES(%eax)
786	movl	$KDS_SEL, TSS_SS(%eax)
787	movl	$KFS_SEL, TSS_FS(%eax)
788	movl	$KGS_SEL, TSS_GS(%eax)
789
790	/*
791	 * Clear the TS bit, the busy bits in both task
792	 * descriptors, and switch tasks.
793	 */
794	clts
795	leal	gdt0, %ecx
796	movl	DFTSS_SEL+4(%ecx), %esi
797	andl	$_BITNOT(0x200), %esi
798	movl	%esi, DFTSS_SEL+4(%ecx)
799	movl	KTSS_SEL+4(%ecx), %esi
800	andl	$_BITNOT(0x200), %esi
801	movl	%esi, KTSS_SEL+4(%ecx)
802	movw	$KTSS_SEL, %cx
803	ltr	%cx
804
805	/*
806	 * Restore part of the process registers.
807	 */
808	movl	TSS_EBP(%eax), %ebp
809	movl	TSS_ECX(%eax), %ecx
810	movl	TSS_ESI(%eax), %esi
811	movl	TSS_EDI(%eax), %edi
812
813make_frame:
814	/*
815	 * Make a trap frame.  Leave the error code (0) on
816	 * the stack since the first word on a trap stack is
817	 * unused anyway.
818	 */
819	pushl	%ebx			/ fault SS
820	pushl	%edx			/ fault ESP
821	pushl	TSS_EFL(%eax)		/ fault EFL
822	pushl	TSS_CS(%eax)		/ fault CS
823	pushl	TSS_EIP(%eax)		/ fault EIP
824	pushl	$0			/ error code
825	pushl	$T_DBLFLT		/ trap number 8
826	movl	TSS_EBX(%eax), %ebx	/ restore EBX
827	movl	TSS_EDX(%eax), %edx	/ restore EDX
828	movl	TSS_EAX(%eax), %eax	/ restore EAX
829	sti				/ enable interrupts
830	jmp	cmntrap
831	SET_SIZE(syserrtrap)
832
833#endif	/* __i386 */
834
835	ENTRY_NP(overrun)
836	push	$0
837	TRAP_NOERR(T_EXTOVRFLT)	/* $9 i386 only - not generated */
838	jmp	cmninttrap
839	SET_SIZE(overrun)
840
841	/*
842	 * #TS
843	 */
844	ENTRY_NP(invtsstrap)
845	TRAP_ERR(T_TSSFLT)	/* $10 already have error code on stack */
846	jmp	cmntrap
847	SET_SIZE(invtsstrap)
848
849	/*
850	 * #NP
851	 */
852	ENTRY_NP(segnptrap)
853	TRAP_ERR(T_SEGFLT)	/* $11 already have error code on stack */
854#if defined(__amd64)
855	SET_CPU_GSBASE
856#endif
857	jmp	cmntrap
858	SET_SIZE(segnptrap)
859
860	/*
861	 * #SS
862	 */
863	ENTRY_NP(stktrap)
864	TRAP_ERR(T_STKFLT)	/* $12 already have error code on stack */
865	jmp	cmntrap
866	SET_SIZE(stktrap)
867
868	/*
869	 * #GP
870	 */
871	ENTRY_NP(gptrap)
872	TRAP_ERR(T_GPFLT)	/* $13 already have error code on stack */
873#if defined(__amd64)
874	SET_CPU_GSBASE
875#endif
876	jmp	cmntrap
877	SET_SIZE(gptrap)
878
879	/*
880	 * #PF
881	 */
882	ENTRY_NP(pftrap)
883	TRAP_ERR(T_PGFLT)	/* $14 already have error code on stack */
884	INTR_PUSH
885
886#if defined(__amd64)
887	movq	%cr2, %r15
888#elif defined(__i386)
889	movl	%cr2, %esi
890#endif	/* __i386 */
891
892	jmp	cmntrap_pushed
893	SET_SIZE(pftrap)
894
895#if !defined(__amd64)
896
897	.globl	idt0_default_r
898
899	/*
900	 * #PF pentium bug workaround
901	 */
902	ENTRY_NP(pentium_pftrap)
903	pushl	%eax
904	movl	%cr2, %eax
905	andl	$MMU_STD_PAGEMASK, %eax
906
907	cmpl	%eax, %cs:idt0_default_r+2	/* fixme */
908
909	je	check_for_user_address
910user_mode:
911	popl	%eax
912	pushl	$T_PGFLT	/* $14 */
913	jmp	cmntrap
914check_for_user_address:
915	/*
916	 * Before we assume that we have an unmapped trap on our hands,
917	 * check to see if this is a fault from user mode.  If it is,
918	 * we'll kick back into the page fault handler.
919	 */
920	movl	4(%esp), %eax	/* error code */
921	andl	$PF_ERR_USER, %eax
922	jnz	user_mode
923
924	/*
925	 * We now know that this is the invalid opcode trap.
926	 */
927	popl	%eax
928	addl	$4, %esp	/* pop error code */
929	jmp	invoptrap
930	SET_SIZE(pentium_pftrap)
931
932#endif	/* !__amd64 */
933
934	ENTRY_NP(resvtrap)
935	TRAP_NOERR(15)		/* (reserved)  */
936	jmp	cmntrap
937	SET_SIZE(resvtrap)
938
939	/*
940	 * #MF
941	 */
942	ENTRY_NP(ndperr)
943	TRAP_NOERR(T_EXTERRFLT)	/* $16 */
944	jmp	cmninttrap
945	SET_SIZE(ndperr)
946
947	/*
948	 * #AC
949	 */
950	ENTRY_NP(achktrap)
951	TRAP_ERR(T_ALIGNMENT)	/* $17 */
952	jmp	cmntrap
953	SET_SIZE(achktrap)
954
955	/*
956	 * #MC
957	 */
958	.globl	cmi_mca_trap	/* see uts/i86pc/os/cmi.c */
959
960#if defined(__amd64)
961
962	ENTRY_NP(mcetrap)
963	TRAP_NOERR(T_MCE)	/* $18 */
964
965	SET_CPU_GSBASE
966
967	INTR_PUSH
968	INTGATE_INIT_KERNEL_FLAGS
969
970	TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
971	TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
972	TRACE_STAMP(%rdi)
973
974	movq	%rsp, %rbp
975
976	movq	%rsp, %rdi	/* arg0 = struct regs *rp */
977	call	cmi_mca_trap	/* cmi_mca_trap(rp); */
978
979	jmp	_sys_rtt
980	SET_SIZE(mcetrap)
981
982#else
983
984	ENTRY_NP(mcetrap)
985	TRAP_NOERR(T_MCE)	/* $18 */
986
987	INTR_PUSH
988	INTGATE_INIT_KERNEL_FLAGS
989
990	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
991	TRACE_REGS(%edi, %esp, %ebx, %ecx)
992	TRACE_STAMP(%edi)
993
994	movl	%esp, %ebp
995
996	movl	%esp, %ecx
997	pushl	%ecx		/* arg0 = struct regs *rp */
998	call	cmi_mca_trap	/* cmi_mca_trap(rp) */
999	addl	$4, %esp	/* pop arg0 */
1000
1001	jmp	_sys_rtt
1002	SET_SIZE(mcetrap)
1003
1004#endif
1005
1006	/*
1007	 * #XF
1008	 */
1009	ENTRY_NP(xmtrap)
1010	TRAP_NOERR(T_SIMDFPE)	/* $19 */
1011	jmp	cmntrap
1012	SET_SIZE(xmtrap)
1013
1014	ENTRY_NP(invaltrap)
1015	TRAP_NOERR(30)		/* very invalid */
1016	jmp	cmntrap
1017	SET_SIZE(invaltrap)
1018
1019	ENTRY_NP(invalint)
1020	TRAP_NOERR(31)		/* even more so */
1021	jmp	cmnint
1022	SET_SIZE(invalint)
1023
1024	.globl	fasttable
1025
1026#if defined(__amd64)
1027
1028	ENTRY_NP(fasttrap)
1029	cmpl	$T_LASTFAST, %eax
1030	ja	1f
1031	orl	%eax, %eax	/* (zero extend top 32-bits) */
1032	leaq	fasttable(%rip), %r11
1033	leaq	(%r11, %rax, CLONGSIZE), %r11
1034	jmp	*(%r11)
10351:
1036	/*
1037	 * Fast syscall number was illegal.  Make it look
1038	 * as if the INT failed.  Modify %rip to point before the
1039	 * INT, push the expected error code and fake a GP fault.
1040	 *
1041	 * XXX Why make the error code be offset into idt + 1?
1042	 * Instead we should push a real (soft?) error code
1043	 * on the stack and #gp handler could know about fasttraps?
1044	 */
1045	subq	$2, (%rsp)	/* XXX int insn 2-bytes */
1046	pushq	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1047	jmp	gptrap
1048	SET_SIZE(fasttrap)
1049
1050#elif defined(__i386)
1051
1052	ENTRY_NP(fasttrap)
1053	cmpl	$T_LASTFAST, %eax
1054	ja	1f
1055	jmp	*%cs:fasttable(, %eax, CLONGSIZE)
10561:
1057	/*
1058	 * Fast syscall number was illegal.  Make it look
1059	 * as if the INT failed.  Modify %eip to point before the
1060	 * INT, push the expected error code and fake a GP fault.
1061	 *
1062	 * XXX Why make the error code be offset into idt + 1?
1063	 * Instead we should push a real (soft?) error code
1064	 * on the stack and #gp handler could know about fasttraps?
1065	 */
1066	subl	$2, (%esp)	/* XXX int insn 2-bytes */
1067	pushl	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1068	jmp	gptrap
1069	SET_SIZE(fasttrap)
1070
1071#endif	/* __i386 */
1072
1073	ENTRY_NP(dtrace_ret)
1074	TRAP_NOERR(T_DTRACE_RET)
1075	jmp	dtrace_trap
1076	SET_SIZE(dtrace_ret)
1077
1078#if defined(__amd64)
1079
1080	/*
1081	 * RFLAGS 24 bytes up the stack from %rsp.
1082	 * XXX a constant would be nicer.
1083	 */
1084	ENTRY_NP(fast_null)
1085	orq	$PS_C, 24(%rsp)	/* set carry bit in user flags */
1086	IRET
1087	/*NOTREACHED*/
1088	SET_SIZE(fast_null)
1089
1090#elif defined(__i386)
1091
1092	ENTRY_NP(fast_null)
1093	orw	$PS_C, 8(%esp)	/* set carry bit in user flags */
1094	IRET
1095	SET_SIZE(fast_null)
1096
1097#endif	/* __i386 */
1098
1099	/*
1100	 * Interrupts start at 32
1101	 */
1102#define MKIVCT(n)			\
1103	ENTRY_NP(ivct/**/n)		\
1104	push	$0;			\
1105	push	$n - 0x20;		\
1106	jmp	cmnint;			\
1107	SET_SIZE(ivct/**/n)
1108
1109	MKIVCT(32)
1110	MKIVCT(33)
1111	MKIVCT(34)
1112	MKIVCT(35)
1113	MKIVCT(36)
1114	MKIVCT(37)
1115	MKIVCT(38)
1116	MKIVCT(39)
1117	MKIVCT(40)
1118	MKIVCT(41)
1119	MKIVCT(42)
1120	MKIVCT(43)
1121	MKIVCT(44)
1122	MKIVCT(45)
1123	MKIVCT(46)
1124	MKIVCT(47)
1125	MKIVCT(48)
1126	MKIVCT(49)
1127	MKIVCT(50)
1128	MKIVCT(51)
1129	MKIVCT(52)
1130	MKIVCT(53)
1131	MKIVCT(54)
1132	MKIVCT(55)
1133	MKIVCT(56)
1134	MKIVCT(57)
1135	MKIVCT(58)
1136	MKIVCT(59)
1137	MKIVCT(60)
1138	MKIVCT(61)
1139	MKIVCT(62)
1140	MKIVCT(63)
1141	MKIVCT(64)
1142	MKIVCT(65)
1143	MKIVCT(66)
1144	MKIVCT(67)
1145	MKIVCT(68)
1146	MKIVCT(69)
1147	MKIVCT(70)
1148	MKIVCT(71)
1149	MKIVCT(72)
1150	MKIVCT(73)
1151	MKIVCT(74)
1152	MKIVCT(75)
1153	MKIVCT(76)
1154	MKIVCT(77)
1155	MKIVCT(78)
1156	MKIVCT(79)
1157	MKIVCT(80)
1158	MKIVCT(81)
1159	MKIVCT(82)
1160	MKIVCT(83)
1161	MKIVCT(84)
1162	MKIVCT(85)
1163	MKIVCT(86)
1164	MKIVCT(87)
1165	MKIVCT(88)
1166	MKIVCT(89)
1167	MKIVCT(90)
1168	MKIVCT(91)
1169	MKIVCT(92)
1170	MKIVCT(93)
1171	MKIVCT(94)
1172	MKIVCT(95)
1173	MKIVCT(96)
1174	MKIVCT(97)
1175	MKIVCT(98)
1176	MKIVCT(99)
1177	MKIVCT(100)
1178	MKIVCT(101)
1179	MKIVCT(102)
1180	MKIVCT(103)
1181	MKIVCT(104)
1182	MKIVCT(105)
1183	MKIVCT(106)
1184	MKIVCT(107)
1185	MKIVCT(108)
1186	MKIVCT(109)
1187	MKIVCT(110)
1188	MKIVCT(111)
1189	MKIVCT(112)
1190	MKIVCT(113)
1191	MKIVCT(114)
1192	MKIVCT(115)
1193	MKIVCT(116)
1194	MKIVCT(117)
1195	MKIVCT(118)
1196	MKIVCT(119)
1197	MKIVCT(120)
1198	MKIVCT(121)
1199	MKIVCT(122)
1200	MKIVCT(123)
1201	MKIVCT(124)
1202	MKIVCT(125)
1203	MKIVCT(126)
1204	MKIVCT(127)
1205	MKIVCT(128)
1206	MKIVCT(129)
1207	MKIVCT(130)
1208	MKIVCT(131)
1209	MKIVCT(132)
1210	MKIVCT(133)
1211	MKIVCT(134)
1212	MKIVCT(135)
1213	MKIVCT(136)
1214	MKIVCT(137)
1215	MKIVCT(138)
1216	MKIVCT(139)
1217	MKIVCT(140)
1218	MKIVCT(141)
1219	MKIVCT(142)
1220	MKIVCT(143)
1221	MKIVCT(144)
1222	MKIVCT(145)
1223	MKIVCT(146)
1224	MKIVCT(147)
1225	MKIVCT(148)
1226	MKIVCT(149)
1227	MKIVCT(150)
1228	MKIVCT(151)
1229	MKIVCT(152)
1230	MKIVCT(153)
1231	MKIVCT(154)
1232	MKIVCT(155)
1233	MKIVCT(156)
1234	MKIVCT(157)
1235	MKIVCT(158)
1236	MKIVCT(159)
1237	MKIVCT(160)
1238	MKIVCT(161)
1239	MKIVCT(162)
1240	MKIVCT(163)
1241	MKIVCT(164)
1242	MKIVCT(165)
1243	MKIVCT(166)
1244	MKIVCT(167)
1245	MKIVCT(168)
1246	MKIVCT(169)
1247	MKIVCT(170)
1248	MKIVCT(171)
1249	MKIVCT(172)
1250	MKIVCT(173)
1251	MKIVCT(174)
1252	MKIVCT(175)
1253	MKIVCT(176)
1254	MKIVCT(177)
1255	MKIVCT(178)
1256	MKIVCT(179)
1257	MKIVCT(180)
1258	MKIVCT(181)
1259	MKIVCT(182)
1260	MKIVCT(183)
1261	MKIVCT(184)
1262	MKIVCT(185)
1263	MKIVCT(186)
1264	MKIVCT(187)
1265	MKIVCT(188)
1266	MKIVCT(189)
1267	MKIVCT(190)
1268	MKIVCT(191)
1269	MKIVCT(192)
1270	MKIVCT(193)
1271	MKIVCT(194)
1272	MKIVCT(195)
1273	MKIVCT(196)
1274	MKIVCT(197)
1275	MKIVCT(198)
1276	MKIVCT(199)
1277	MKIVCT(200)
1278	MKIVCT(201)
1279	MKIVCT(202)
1280	MKIVCT(203)
1281	MKIVCT(204)
1282	MKIVCT(205)
1283	MKIVCT(206)
1284	MKIVCT(207)
1285	MKIVCT(208)
1286	MKIVCT(209)
1287	MKIVCT(210)
1288	MKIVCT(211)
1289	MKIVCT(212)
1290	MKIVCT(213)
1291	MKIVCT(214)
1292	MKIVCT(215)
1293	MKIVCT(216)
1294	MKIVCT(217)
1295	MKIVCT(218)
1296	MKIVCT(219)
1297	MKIVCT(220)
1298	MKIVCT(221)
1299	MKIVCT(222)
1300	MKIVCT(223)
1301	MKIVCT(224)
1302	MKIVCT(225)
1303	MKIVCT(226)
1304	MKIVCT(227)
1305	MKIVCT(228)
1306	MKIVCT(229)
1307	MKIVCT(230)
1308	MKIVCT(231)
1309	MKIVCT(232)
1310	MKIVCT(233)
1311	MKIVCT(234)
1312	MKIVCT(235)
1313	MKIVCT(236)
1314	MKIVCT(237)
1315	MKIVCT(238)
1316	MKIVCT(239)
1317	MKIVCT(240)
1318	MKIVCT(241)
1319	MKIVCT(242)
1320	MKIVCT(243)
1321	MKIVCT(244)
1322	MKIVCT(245)
1323	MKIVCT(246)
1324	MKIVCT(247)
1325	MKIVCT(248)
1326	MKIVCT(249)
1327	MKIVCT(250)
1328	MKIVCT(251)
1329	MKIVCT(252)
1330	MKIVCT(253)
1331	MKIVCT(254)
1332	MKIVCT(255)
1333
1334#endif	/* __lint */
1335