xref: /titanic_41/usr/src/uts/intel/ia32/ml/exception.s (revision 02e56f3f1bfc8d9977bafb8cb5202f576dcded27)
1/*
2 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
3 * Use is subject to license terms.
4 */
5
6/*
7 * Copyright (c) 1989, 1990 William F. Jolitz.
8 * Copyright (c) 1990 The Regents of the University of California.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
40 */
41
42#pragma ident	"%Z%%M%	%I%	%E% SMI"
43
44#include <sys/asm_linkage.h>
45#include <sys/asm_misc.h>
46#include <sys/trap.h>
47#include <sys/psw.h>
48#include <sys/regset.h>
49#include <sys/privregs.h>
50#include <sys/dtrace.h>
51#include <sys/traptrace.h>
52
53/*
54 * only one routine in this file is interesting to lint
55 */
56
57#if defined(__lint)
58
59void
60ndptrap_frstor(void)
61{}
62
63#else
64
65#include "assym.h"
66
67/*
68 * push $0 on stack for traps that do not
69 * generate an error code. This is so the rest
70 * of the kernel can expect a consistent stack
71 * from from any exception.
72 */
73#define	TRAP_NOERR(trapno)	\
74	push	$0;		\
75	push	$trapno
76
77/*
78 * error code already pushed by hw
79 * onto stack.
80 */
81#define	TRAP_ERR(trapno)	\
82	push	$trapno
83
84	/*
85	 * #DE
86	 */
87	ENTRY_NP(div0trap)
88	TRAP_NOERR(T_ZERODIV)	/* $0 */
89	jmp	cmntrap
90	SET_SIZE(div0trap)
91
92#if defined(__amd64)
93	/*
94	 * #DB
95	 *
96	 * If we get here as a result of single-stepping a sysenter
97	 * instruction, we suddenly find ourselves taking a #db
98	 * in kernel mode -before- we've swapgs'ed.  So before we can
99	 * take the trap, we do the swapgs here, and fix the return
100	 * %rip in trap() so that we return immediately after the
101	 * swapgs in the sysenter handler to avoid doing the swapgs again.
102	 *
103	 * Nobody said that the design of sysenter was particularly
104	 * elegant, did they?
105	 */
106	ENTRY_NP(dbgtrap)
107	pushq	%r11
108	leaq	sys_sysenter(%rip), %r11
109	cmpq	%r11, 8(%rsp)
110	jne	1f
111	swapgs
1121:	popq	%r11
113	TRAP_NOERR(T_SGLSTP)	/* $1 */
114	jmp	cmntrap
115	SET_SIZE(dbgtrap)
116
117#elif defined(__i386)
118	/*
119	 * #DB
120	 */
121	ENTRY_NP(dbgtrap)
122	TRAP_NOERR(T_SGLSTP)	/* $1 */
123	jmp	cmntrap
124	SET_SIZE(dbgtrap)
125#endif
126
127#if defined(__amd64)
128
129/*
130 * Macro to set the gsbase or kgsbase to the address of the struct cpu
131 * for this processor.  If we came from userland, set kgsbase else clear
132 * gs and set gsbase.  We find the proper cpu struct by looping through
133 * the cpu structs for all processors till we find a match for the gdt
134 * of the trapping processor.  The stack is expected to be pointing at
135 * The standard regs pushed by hardware on a trap (plus error code and trapno).
136 */
137#define	SET_CPU_GSBASE							\
138	subq	$REGOFF_TRAPNO, %rsp;	/* save regs */			\
139	movq	%rax, REGOFF_RAX(%rsp);					\
140	movq	%rbx, REGOFF_RBX(%rsp);					\
141	movq	%rcx, REGOFF_RCX(%rsp);					\
142	movq	%rdx, REGOFF_RDX(%rsp);					\
143	movq	%rbp, REGOFF_RBP(%rsp);					\
144	movq	%rsp, %rbp;						\
145	subq	$16, %rsp;		/* space for gdt */		\
146	sgdt	6(%rsp);						\
147	movq	8(%rsp), %rcx;		/* %rcx has gdt to match */	\
148	xorl	%ebx, %ebx;		/* loop index */		\
149	leaq	cpu(%rip), %rdx;	/* cpu pointer array */		\
1501:									\
151	movq	(%rdx, %rbx, CLONGSIZE), %rax;	/* get cpu[i] */	\
152	cmpq	$0x0, %rax;		/* cpu[i] == NULL ? */		\
153	je	2f;			/* yes, continue */		\
154	cmpq	%rcx, CPU_GDT(%rax);	/* gdt == cpu[i]->cpu_gdt ? */	\
155	je	3f;			/* yes, go set gsbase */	\
1562:									\
157	incl	%ebx;			/* i++ */			\
158	cmpl	$NCPU, %ebx;		/* i < NCPU ? */		\
159	jb	1b;			/* yes, loop */			\
160/* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */	\
1613:									\
162	movl	$MSR_AMD_KGSBASE, %ecx;					\
163	cmpw	$KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */	\
164	jne	4f;			/* no, go set KGSBASE */	\
165	movl	$MSR_AMD_GSBASE, %ecx;	/* yes, set GSBASE */		\
166        mfence;				/* OPTERON_ERRATUM_88 */	\
1674:									\
168	movq	%rax, %rdx;		/* write base register */	\
169	shrq	$32, %rdx;						\
170	wrmsr;								\
171	movq	REGOFF_RDX(%rbp), %rdx;	/* restore regs */		\
172	movq	REGOFF_RCX(%rbp), %rcx;					\
173	movq	REGOFF_RBX(%rbp), %rbx;					\
174	movq	REGOFF_RAX(%rbp), %rax;					\
175	movq	%rbp, %rsp;						\
176	movq	REGOFF_RBP(%rsp), %rbp;					\
177	addq	$REGOFF_TRAPNO, %rsp	/* pop stack */
178#endif	/* __amd64 */
179
180
181
182
183	.globl	nmivect
184	.globl	idt0_default_r
185
186#if defined(__amd64)
187
188	/*
189	 * #NMI
190	 */
191	ENTRY_NP(nmiint)
192	TRAP_NOERR(T_NMIFLT)	/* $2 */
193
194	SET_CPU_GSBASE
195
196	/*
197	 * Save all registers and setup segment registers
198	 * with kernel selectors.
199	 */
200	INTR_PUSH
201
202	DISABLE_INTR_FLAGS		/* and set the kernel flags */
203
204	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
205
206	TRACE_REGS(%r12, %rsp, %rax, %rbx)
207	TRACE_STAMP(%r12)
208
209	movq	%rsp, %rbp
210
211	movq	%rbp, %rdi
212	call	av_dispatch_nmivect
213
214	INTR_POP
215	iretq
216	SET_SIZE(nmiint)
217
218#elif defined(__i386)
219
220	/*
221	 * #NMI
222	 */
223	ENTRY_NP(nmiint)
224	TRAP_NOERR(T_NMIFLT)	/* $2 */
225
226	/*
227	 * Save all registers and setup segment registers
228	 * with kernel selectors.
229	 */
230	INTR_PUSH
231
232	/*
233	 * setup pointer to reg struct as 2nd argument.
234	 */
235	movl	%esp, %ebp
236	pushl	%ebp
237
238	DISABLE_INTR_FLAGS
239
240	movl	nmivect, %esi		/* get autovect structure */
241loop1:
242	cmpl	$0, %esi		/* if pointer is null  */
243	je	.intr_ret		/* 	we're done */
244	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
245	pushl	AV_INTARG1(%esi)	/* get argument to interrupt routine */
246	call	*%edx			/* call interrupt routine with arg */
247	addl	$4, %esp
248	movl	AV_LINK(%esi), %esi	/* get next routine on list */
249	jmp	loop1			/* keep looping until end of list */
250
251.intr_ret:
252	addl	$4, %esp		/* 'pop' %ebp */
253	INTR_POP_USER
254	iret
255	SET_SIZE(nmiint)
256
257#endif	/* __i386 */
258
259	/*
260	 * #BP
261	 */
262	ENTRY_NP(brktrap)
263#if defined(__amd64)
264	cmpw	$KCS_SEL, 8(%rsp)
265	je	bp_jmpud
266#endif
267
268	TRAP_NOERR(T_BPTFLT)	/* $3 */
269	jmp	dtrace_trap
270
271#if defined(__amd64)
272bp_jmpud:
273	/*
274	 * This is a breakpoint in the kernel -- it is very likely that this
275	 * is DTrace-induced.  To unify DTrace handling, we spoof this as an
276	 * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
277	 * we must decrement the trapping %rip to make it appear as a fault.
278	 * We then push a non-zero error code to indicate that this is coming
279	 * from #BP.
280	 */
281	decq	(%rsp)
282	push	$1			/* error code -- non-zero for #BP */
283	jmp	ud_kernel
284#endif
285
286	SET_SIZE(brktrap)
287
288	/*
289	 * #OF
290	 */
291	ENTRY_NP(ovflotrap)
292	TRAP_NOERR(T_OVFLW)	/* $4 */
293	jmp	cmntrap
294	SET_SIZE(ovflotrap)
295
296	/*
297	 * #BR
298	 */
299	ENTRY_NP(boundstrap)
300	TRAP_NOERR(T_BOUNDFLT)	/* $5 */
301	jmp	cmntrap
302	SET_SIZE(boundstrap)
303
304#if defined(__amd64)
305
306	ENTRY_NP(invoptrap)
307	cmpw	$KCS_SEL, 8(%rsp)
308	jne	ud_user
309
310	push	$0			/* error code -- zero for #UD */
311ud_kernel:
312	push	$0xdddd			/* a dummy trap number */
313	TRAP_PUSH
314	movq	REGOFF_RIP(%rsp), %rdi
315	movq	REGOFF_RSP(%rsp), %rsi
316	movq	REGOFF_RAX(%rsp), %rdx
317	pushq	(%rsi)
318	movq	%rsp, %rsi
319	call	dtrace_invop
320	ALTENTRY(dtrace_invop_callsite)
321	addq	$8, %rsp
322	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
323	je	ud_push
324	cmpl	$DTRACE_INVOP_LEAVE, %eax
325	je	ud_leave
326	cmpl	$DTRACE_INVOP_NOP, %eax
327	je	ud_nop
328	cmpl	$DTRACE_INVOP_RET, %eax
329	je	ud_ret
330	jmp	ud_trap
331
332ud_push:
333	/*
334	 * We must emulate a "pushq %rbp".  To do this, we pull the stack
335	 * down 8 bytes, and then store the base pointer.
336	 */
337	INTR_POP
338	subq	$16, %rsp		/* make room for %rbp */
339	pushq	%rax			/* push temp */
340	movq	24(%rsp), %rax		/* load calling RIP */
341	addq	$1, %rax		/* increment over trapping instr */
342	movq	%rax, 8(%rsp)		/* store calling RIP */
343	movq	32(%rsp), %rax		/* load calling CS */
344	movq	%rax, 16(%rsp)		/* store calling CS */
345	movq	40(%rsp), %rax		/* load calling RFLAGS */
346	movq	%rax, 24(%rsp)		/* store calling RFLAGS */
347	movq	48(%rsp), %rax		/* load calling RSP */
348	subq	$8, %rax		/* make room for %rbp */
349	movq	%rax, 32(%rsp)		/* store calling RSP */
350	movq	56(%rsp), %rax		/* load calling SS */
351	movq	%rax, 40(%rsp)		/* store calling SS */
352	movq	32(%rsp), %rax		/* reload calling RSP */
353	movq	%rbp, (%rax)		/* store %rbp there */
354	popq	%rax			/* pop off temp */
355	iretq				/* return from interrupt */
356
357ud_leave:
358	/*
359	 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
360	 * followed by a "popq %rbp".  This is quite a bit simpler on amd64
361	 * than it is on i386 -- we can exploit the fact that the %rsp is
362	 * explicitly saved to effect the pop without having to reshuffle
363	 * the other data pushed for the trap.
364	 */
365	INTR_POP
366	pushq	%rax			/* push temp */
367	movq	8(%rsp), %rax		/* load calling RIP */
368	addq	$1, %rax		/* increment over trapping instr */
369	movq	%rax, 8(%rsp)		/* store calling RIP */
370	movq	(%rbp), %rax		/* get new %rbp */
371	addq	$8, %rbp		/* adjust new %rsp */
372	movq	%rbp, 32(%rsp)		/* store new %rsp */
373	movq	%rax, %rbp		/* set new %rbp */
374	popq	%rax			/* pop off temp */
375	iretq				/* return from interrupt */
376
377ud_nop:
378	/*
379	 * We must emulate a "nop".  This is obviously not hard:  we need only
380	 * advance the %rip by one.
381	 */
382	INTR_POP
383	incq	(%rsp)
384	iretq
385
386ud_ret:
387	INTR_POP
388	pushq	%rax			/* push temp */
389	movq	32(%rsp), %rax		/* load %rsp */
390	movq	(%rax), %rax		/* load calling RIP */
391	movq	%rax, 8(%rsp)		/* store calling RIP */
392	addq	$8, 32(%rsp)		/* adjust new %rsp */
393	popq	%rax			/* pop off temp */
394	iretq				/* return from interrupt */
395
396ud_trap:
397	/*
398	 * We're going to let the kernel handle this as a normal #UD.  If,
399	 * however, we came through #BP and are spoofing #UD (in this case,
400	 * the stored error value will be non-zero), we need to de-spoof
401	 * the trap by incrementing %rip and pushing T_BPTFLT.
402	 */
403	cmpq	$0, REGOFF_ERR(%rsp)
404	je	ud_ud
405	incq	REGOFF_RIP(%rsp)
406	addq	$REGOFF_RIP, %rsp
407	TRAP_NOERR(T_BPTFLT)	/* $3 */
408	jmp	cmntrap
409
410ud_ud:
411	addq	$REGOFF_RIP, %rsp
412ud_user:
413	TRAP_NOERR(T_ILLINST)
414	jmp	cmntrap
415	SET_SIZE(invoptrap)
416
417#elif defined(__i386)
418
419	/*
420	 * #UD
421	 */
422	ENTRY_NP(invoptrap)
423	/*
424	 * If we are taking an invalid opcode trap while in the kernel, this
425	 * is likely an FBT probe point.
426	 */
427	pushl   %gs
428	cmpw	$KGS_SEL, (%esp)
429	jne	8f
430	addl	$4, %esp
431	pusha
432	pushl	%eax			/* push %eax -- may be return value */
433	pushl	%esp			/* push stack pointer */
434	addl	$48, (%esp)		/* adjust to incoming args */
435	pushl	40(%esp)		/* push calling EIP */
436	call	dtrace_invop
437	ALTENTRY(dtrace_invop_callsite)
438	addl	$12, %esp
439	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
440	je	1f
441	cmpl	$DTRACE_INVOP_POPL_EBP, %eax
442	je	2f
443	cmpl	$DTRACE_INVOP_LEAVE, %eax
444	je	3f
445	cmpl	$DTRACE_INVOP_NOP, %eax
446	je	4f
447	jmp	7f
448
4491:
450	/*
451	 * We must emulate a "pushl %ebp".  To do this, we pull the stack
452	 * down 4 bytes, and then store the base pointer.
453	 */
454	popa
455	subl	$4, %esp		/* make room for %ebp */
456	pushl	%eax			/* push temp */
457	movl	8(%esp), %eax		/* load calling EIP */
458	incl	%eax			/* increment over LOCK prefix */
459	movl	%eax, 4(%esp)		/* store calling EIP */
460	movl	12(%esp), %eax		/* load calling CS */
461	movl	%eax, 8(%esp)		/* store calling CS */
462	movl	16(%esp), %eax		/* load calling EFLAGS */
463	movl	%eax, 12(%esp)		/* store calling EFLAGS */
464	movl	%ebp, 16(%esp)		/* push %ebp */
465	popl	%eax			/* pop off temp */
466	iret				/* return from interrupt */
467
4682:
469	/*
470	 * We must emulate a "popl %ebp".  To do this, we do the opposite of
471	 * the above:  we remove the %ebp from the stack, and squeeze up the
472	 * saved state from the trap.
473	 */
474	popa
475	pushl	%eax			/* push temp */
476	movl	16(%esp), %ebp		/* pop %ebp */
477	movl	12(%esp), %eax		/* load calling EFLAGS */
478	movl	%eax, 16(%esp)		/* store calling EFLAGS */
479	movl	8(%esp), %eax		/* load calling CS */
480	movl	%eax, 12(%esp)		/* store calling CS */
481	movl	4(%esp), %eax		/* load calling EIP */
482	incl	%eax			/* increment over LOCK prefix */
483	movl	%eax, 8(%esp)		/* store calling EIP */
484	popl	%eax			/* pop off temp */
485	addl	$4, %esp		/* adjust stack pointer */
486	iret				/* return from interrupt */
487
4883:
489	/*
490	 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
491	 * followed by a "popl %ebp".  This looks similar to the above, but
492	 * requires two temporaries:  one for the new base pointer, and one
493	 * for the staging register.
494	 */
495	popa
496	pushl	%eax			/* push temp */
497	pushl	%ebx			/* push temp */
498	movl	%ebp, %ebx		/* set temp to old %ebp */
499	movl	(%ebx), %ebp		/* pop %ebp */
500	movl	16(%esp), %eax		/* load calling EFLAGS */
501	movl	%eax, (%ebx)		/* store calling EFLAGS */
502	movl	12(%esp), %eax		/* load calling CS */
503	movl	%eax, -4(%ebx)		/* store calling CS */
504	movl	8(%esp), %eax		/* load calling EIP */
505	incl	%eax			/* increment over LOCK prefix */
506	movl	%eax, -8(%ebx)		/* store calling EIP */
507	movl	%ebx, -4(%esp)		/* temporarily store new %esp */
508	popl	%ebx			/* pop off temp */
509	popl	%eax			/* pop off temp */
510	movl	-12(%esp), %esp		/* set stack pointer */
511	subl	$8, %esp		/* adjust for three pushes, one pop */
512	iret				/* return from interrupt */
513
5144:
515	/*
516	 * We must emulate a "nop".  This is obviously not hard:  we need only
517	 * advance the %eip by one.
518	 */
519	popa
520	incl	(%esp)
521	iret
522
5237:
524	popa
525	pushl	$0
526	pushl	$T_ILLINST	/* $6 */
527	jmp	cmntrap
5288:
529	addl	$4, %esp
530	pushl	$0
531	pushl	$T_ILLINST	/* $6 */
532	jmp	cmntrap
533	SET_SIZE(invoptrap)
534
535#endif	/* __i386 */
536
537#if defined(__amd64)
538
539	/*
540	 * #NM
541	 */
542	ENTRY_NP(ndptrap)
543	/*
544	 * We want to do this quickly as every lwp using fp will take this
545	 * after a context switch -- we do the frequent path in ndptrap_frstor
546	 * below; for all other cases, we let the trap code handle it
547	 */
548	pushq	%rax
549	pushq	%rbx
550	cmpw    $KCS_SEL, 24(%rsp)	/* did we come from kernel mode? */
551	jne     1f
552	LOADCPU(%rbx)			/* if yes, don't swapgs */
553	jmp	2f
5541:
555	swapgs				/* if from user, need swapgs */
556	LOADCPU(%rbx)
557	swapgs
5582:
559	cmpl	$0, fpu_exists(%rip)
560	je	.handle_in_trap		/* let trap handle no fp case */
561	movq	CPU_THREAD(%rbx), %rax	/* %rax = curthread */
562	movl	$FPU_EN, %ebx
563	movq	T_LWP(%rax), %rax	/* %rax = lwp */
564	testq	%rax, %rax
565	jz	.handle_in_trap		/* should not happen? */
566#if LWP_PCB_FPU	!= 0
567	addq	$LWP_PCB_FPU, %rax	/* &lwp->lwp_pcb.pcb_fpu */
568#endif
569	testl	%ebx, PCB_FPU_FLAGS(%rax)
570	jz	.handle_in_trap		/* must be the first fault */
571	clts
572	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rax)
573#if FPU_CTX_FPU_REGS != 0
574	addq	$FPU_CTX_FPU_REGS, %rax
575#endif
576	/*
577	 * the label below is used in trap.c to detect FP faults in
578	 * kernel due to user fault.
579	 */
580	ALTENTRY(ndptrap_frstor)
581	fxrstor	(%rax)
582	popq	%rbx
583	popq	%rax
584	iretq
585
586.handle_in_trap:
587	popq	%rbx
588	popq	%rax
589	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
590	jmp	cmninttrap
591	SET_SIZE(ndptrap_frstor)
592	SET_SIZE(ndptrap)
593
594#elif defined(__i386)
595
596	ENTRY_NP(ndptrap)
597	/*
598	 * We want to do this quickly as every lwp using fp will take this
599	 * after a context switch -- we do the frequent path in fpnoextflt
600	 * below; for all other cases, we let the trap code handle it
601	 */
602	pushl	%eax
603	pushl	%ebx
604	pushl	%ds
605	pushl	%gs
606	movl	$KDS_SEL, %ebx
607	movw	%bx, %ds
608	movl	$KGS_SEL, %eax
609	movw	%ax, %gs
610	LOADCPU(%ebx)
611	cmpl	$0, fpu_exists
612	je	.handle_in_trap		/* let trap handle no fp case */
613	movl	CPU_THREAD(%ebx), %eax	/* %eax = curthread */
614	movl	$FPU_EN, %ebx
615	movl	T_LWP(%eax), %eax	/* %eax = lwp */
616	testl	%eax, %eax
617	jz	.handle_in_trap		/* should not happen? */
618#if LWP_PCB_FPU != 0
619	addl	$LWP_PCB_FPU, %eax 	/* &lwp->lwp_pcb.pcb_fpu */
620#endif
621	testl	%ebx, PCB_FPU_FLAGS(%eax)
622	jz	.handle_in_trap		/* must be the first fault */
623	clts
624	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%eax)
625#if FPU_CTX_FPU_REGS != 0
626	addl	$FPU_CTX_FPU_REGS, %eax
627#endif
628	/*
629	 * the label below is used in trap.c to detect FP faults in kernel
630	 * due to user fault.
631	 */
632	ALTENTRY(ndptrap_frstor)
633	.globl	_patch_fxrstor_eax
634_patch_fxrstor_eax:
635	frstor	(%eax)		/* may be patched to fxrstor */
636	nop			/* (including this byte) */
637	popl	%gs
638	popl	%ds
639	popl	%ebx
640	popl	%eax
641	iret
642
643.handle_in_trap:
644	popl	%gs
645	popl	%ds
646	popl	%ebx
647	popl	%eax
648	pushl	$0
649	pushl	$T_NOEXTFLT	/* $7 */
650	jmp	cmninttrap
651	SET_SIZE(ndptrap_frstor)
652	SET_SIZE(ndptrap)
653
654#endif	/* __i386 */
655
656#if defined(__amd64)
657
658	/*
659	 * #DF
660	 */
661	ENTRY_NP(syserrtrap)
662	pushq	$T_DBLFLT
663
664	SET_CPU_GSBASE
665
666	/*
667	 * We share this handler with kmdb (if kmdb is loaded).  As such, we may
668	 * have reached this point after encountering a #df in kmdb.  If that
669	 * happens, we'll still be on kmdb's IDT.  We need to switch back to this
670	 * CPU's IDT before proceeding.  Furthermore, if we did arrive here from
671	 * kmdb, kmdb is probably in a very sickly state, and shouldn't be
672	 * entered from the panic flow.  We'll suppress that entry by setting
673	 * nopanicdebug.
674	 */
675	pushq	%rax
676	subq	$DESCTBR_SIZE, %rsp
677	sidt	(%rsp)
678	movq	%gs:CPU_IDT, %rax
679	cmpq	%rax, DTR_BASE(%rsp)
680	je	1f
681
682	movq	%rax, DTR_BASE(%rsp)
683	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
684	lidt	(%rsp)
685
686	movl	$1, nopanicdebug
687
6881:	addq	$DESCTBR_SIZE, %rsp
689	popq	%rax
690
691	DFTRAP_PUSH
692
693	/*
694	 * freeze trap trace.
695	 */
696#ifdef TRAPTRACE
697	leaq	trap_trace_freeze(%rip), %r11
698	incl	(%r11)
699#endif
700
701	ENABLE_INTR_FLAGS
702
703	movq	%rsp, %rdi	/* &regs */
704	xorl	%esi, %esi	/* clear address */
705	xorl	%edx, %edx	/* cpuid = 0 */
706	call	trap
707
708	SET_SIZE(syserrtrap)
709
710#elif defined(__i386)
711
712	/*
713	 * #DF
714	 */
715	ENTRY_NP(syserrtrap)
716	cli				/* disable interrupts */
717
718	/*
719	 * We share this handler with kmdb (if kmdb is loaded).  As such, we may
720	 * have reached this point after encountering a #df in kmdb.  If that
721	 * happens, we'll still be on kmdb's IDT.  We need to switch back to this
722	 * CPU's IDT before proceeding.  Furthermore, if we did arrive here from
723	 * kmdb, kmdb is probably in a very sickly state, and shouldn't be
724	 * entered from the panic flow.  We'll suppress that entry by setting
725	 * nopanicdebug.
726	 */
727	subl	$DESCTBR_SIZE, %esp
728	movl	%gs:CPU_IDT, %eax
729	sidt	(%esp)
730	cmpl	DTR_BASE(%esp), %eax
731	je	1f
732
733	movl	%eax, DTR_BASE(%esp)
734	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
735	lidt	(%esp)
736
737	movl	$1, nopanicdebug
738
7391:	addl	$DESCTBR_SIZE, %esp
740
741	/*
742	 * Check the CPL in the TSS to see what mode
743	 * (user or kernel) we took the fault in.  At this
744	 * point we are running in the context of the double
745	 * fault task (dftss) but the CPU's task points to
746	 * the previous task (ktss) where the process context
747	 * has been saved as the result of the task switch.
748	 */
749	movl	%gs:CPU_TSS, %eax	/* get the TSS */
750	movl	TSS_SS(%eax), %ebx	/* save the fault SS */
751	movl	TSS_ESP(%eax), %edx	/* save the fault ESP */
752	testw	$CPL_MASK, TSS_CS(%eax)	/* user mode ? */
753	jz	make_frame
754	movw	TSS_SS0(%eax), %ss	/* get on the kernel stack */
755	movl	TSS_ESP0(%eax), %esp
756
757	/*
758	 * Clear the NT flag to avoid a task switch when the process
759	 * finally pops the EFL off the stack via an iret.  Clear
760	 * the TF flag since that is what the processor does for
761	 * a normal exception. Clear the IE flag so that interrupts
762	 * remain disabled.
763	 */
764	movl	TSS_EFL(%eax), %ecx
765	andl	$_BITNOT(PS_NT|PS_T|PS_IE), %ecx
766	pushl	%ecx
767	popfl				/* restore the EFL */
768	movw	TSS_LDT(%eax), %cx	/* restore the LDT */
769	lldt	%cx
770
771	/*
772	 * Restore process segment selectors.
773	 */
774	movw	TSS_DS(%eax), %ds
775	movw	TSS_ES(%eax), %es
776	movw	TSS_FS(%eax), %fs
777	movw	TSS_GS(%eax), %gs
778
779	/*
780	 * Restore task segment selectors.
781	 */
782	movl	$KDS_SEL, TSS_DS(%eax)
783	movl	$KDS_SEL, TSS_ES(%eax)
784	movl	$KDS_SEL, TSS_SS(%eax)
785	movl	$KFS_SEL, TSS_FS(%eax)
786	movl	$KGS_SEL, TSS_GS(%eax)
787
788	/*
789	 * Clear the TS bit, the busy bits in both task
790	 * descriptors, and switch tasks.
791	 */
792	clts
793	leal	gdt0, %ecx
794	movl	DFTSS_SEL+4(%ecx), %esi
795	andl	$_BITNOT(0x200), %esi
796	movl	%esi, DFTSS_SEL+4(%ecx)
797	movl	KTSS_SEL+4(%ecx), %esi
798	andl	$_BITNOT(0x200), %esi
799	movl	%esi, KTSS_SEL+4(%ecx)
800	movw	$KTSS_SEL, %cx
801	ltr	%cx
802
803	/*
804	 * Restore part of the process registers.
805	 */
806	movl	TSS_EBP(%eax), %ebp
807	movl	TSS_ECX(%eax), %ecx
808	movl	TSS_ESI(%eax), %esi
809	movl	TSS_EDI(%eax), %edi
810
811make_frame:
812	/*
813	 * Make a trap frame.  Leave the error code (0) on
814	 * the stack since the first word on a trap stack is
815	 * unused anyway.
816	 */
817	pushl	%ebx			/ fault SS
818	pushl	%edx			/ fault ESP
819	pushl	TSS_EFL(%eax)		/ fault EFL
820	pushl	TSS_CS(%eax)		/ fault CS
821	pushl	TSS_EIP(%eax)		/ fault EIP
822	pushl	$0			/ error code
823	pushl	$T_DBLFLT		/ trap number 8
824	movl	TSS_EBX(%eax), %ebx	/ restore EBX
825	movl	TSS_EDX(%eax), %edx	/ restore EDX
826	movl	TSS_EAX(%eax), %eax	/ restore EAX
827	sti				/ enable interrupts
828	jmp	cmntrap
829	SET_SIZE(syserrtrap)
830
831#endif	/* __i386 */
832
833	ENTRY_NP(overrun)
834	push	$0
835	TRAP_NOERR(T_EXTOVRFLT)	/* $9 i386 only - not generated */
836	jmp	cmninttrap
837	SET_SIZE(overrun)
838
839	/*
840	 * #TS
841	 */
842	ENTRY_NP(invtsstrap)
843	TRAP_ERR(T_TSSFLT)	/* $10 already have error code on stack */
844	jmp	cmntrap
845	SET_SIZE(invtsstrap)
846
847	/*
848	 * #NP
849	 */
850	ENTRY_NP(segnptrap)
851	TRAP_ERR(T_SEGFLT)	/* $11 already have error code on stack */
852#if defined(__amd64)
853	SET_CPU_GSBASE
854#endif
855	jmp	cmntrap
856	SET_SIZE(segnptrap)
857
858	/*
859	 * #SS
860	 */
861	ENTRY_NP(stktrap)
862	TRAP_ERR(T_STKFLT)	/* $12 already have error code on stack */
863	jmp	cmntrap
864	SET_SIZE(stktrap)
865
866	/*
867	 * #GP
868	 */
869	ENTRY_NP(gptrap)
870	TRAP_ERR(T_GPFLT)	/* $13 already have error code on stack */
871#if defined(__amd64)
872	SET_CPU_GSBASE
873#endif
874	jmp	cmntrap
875	SET_SIZE(gptrap)
876
877	/*
878	 * #PF
879	 */
880	ENTRY_NP(pftrap)
881	TRAP_ERR(T_PGFLT)	/* $14 already have error code on stack */
882	jmp	cmntrap
883	SET_SIZE(pftrap)
884
885#if !defined(__amd64)
886
887	/*
888	 * #PF pentium bug workaround
889	 */
890	ENTRY_NP(pentium_pftrap)
891	pushl	%eax
892	movl	%cr2, %eax
893	andl	$MMU_STD_PAGEMASK, %eax
894
895	cmpl	%eax, %cs:idt0_default_r+2	/* fixme */
896
897	je	check_for_user_address
898user_mode:
899	popl	%eax
900	pushl	$T_PGFLT	/* $14 */
901	jmp	cmntrap
902check_for_user_address:
903	/*
904	 * Before we assume that we have an unmapped trap on our hands,
905	 * check to see if this is a fault from user mode.  If it is,
906	 * we'll kick back into the page fault handler.
907	 */
908	movl	4(%esp), %eax	/* error code */
909	andl	$PF_ERR_USER, %eax
910	jnz	user_mode
911
912	/*
913	 * We now know that this is the invalid opcode trap.
914	 */
915	popl	%eax
916	addl	$4, %esp	/* pop error code */
917	jmp	invoptrap
918	SET_SIZE(pentium_pftrap)
919
920#endif	/* !__amd64 */
921
922	ENTRY_NP(resvtrap)
923	TRAP_NOERR(15)		/* (reserved)  */
924	jmp	cmntrap
925	SET_SIZE(resvtrap)
926
927	/*
928	 * #MF
929	 */
930	ENTRY_NP(ndperr)
931	TRAP_NOERR(T_EXTERRFLT)	/* $16 */
932	jmp	cmninttrap
933	SET_SIZE(ndperr)
934
935	/*
936	 * #AC
937	 */
938	ENTRY_NP(achktrap)
939	TRAP_ERR(T_ALIGNMENT)	/* $17 */
940	jmp	cmntrap
941	SET_SIZE(achktrap)
942
943	/*
944	 * #MC
945	 */
946	ENTRY_NP(mcetrap)
947	TRAP_NOERR(T_MCE)	/* $18 */
948#if defined(__amd64)
949	SET_CPU_GSBASE
950#endif
951	jmp	cmninttrap
952	SET_SIZE(mcetrap)
953
954	/*
955	 * #XF
956	 */
957	ENTRY_NP(xmtrap)
958	TRAP_NOERR(T_SIMDFPE)	/* $19 */
959	jmp	cmntrap
960	SET_SIZE(xmtrap)
961
962	ENTRY_NP(invaltrap)
963	TRAP_NOERR(30)		/* very invalid */
964	jmp	cmntrap
965	SET_SIZE(invaltrap)
966
967	ENTRY_NP(invalint)
968	TRAP_NOERR(31)		/* even more so */
969	jmp	cmnint
970	SET_SIZE(invalint)
971
972	.globl	fasttable
973
974#if defined(__amd64)
975
976	ENTRY_NP(fasttrap)
977	cmpl	$T_LASTFAST, %eax
978	ja	1f
979	orl	%eax, %eax	/* (zero extend top 32-bits) */
980	leaq	fasttable(%rip), %r11
981	leaq	(%r11, %rax, CLONGSIZE), %r11
982	jmp	*(%r11)
9831:
984	/*
985	 * Fast syscall number was illegal.  Make it look
986	 * as if the INT failed.  Modify %rip to point before the
987	 * INT, push the expected error code and fake a GP fault.
988	 *
989	 * XXX Why make the error code be offset into idt + 1?
990	 * Instead we should push a real (soft?) error code
991	 * on the stack and #gp handler could know about fasttraps?
992	 */
993	subq	$2, (%rsp)	/* XXX int insn 2-bytes */
994	pushq	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
995	jmp	gptrap
996	SET_SIZE(fasttrap)
997
998#elif defined(__i386)
999
1000	ENTRY_NP(fasttrap)
1001	cmpl	$T_LASTFAST, %eax
1002	ja	1f
1003	jmp	*%cs:fasttable(, %eax, CLONGSIZE)
10041:
1005	/*
1006	 * Fast syscall number was illegal.  Make it look
1007	 * as if the INT failed.  Modify %eip to point before the
1008	 * INT, push the expected error code and fake a GP fault.
1009	 *
1010	 * XXX Why make the error code be offset into idt + 1?
1011	 * Instead we should push a real (soft?) error code
1012	 * on the stack and #gp handler could know about fasttraps?
1013	 */
1014	subl	$2, (%esp)	/* XXX int insn 2-bytes */
1015	pushl	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1016	jmp	gptrap
1017	SET_SIZE(fasttrap)
1018
1019#endif	/* __i386 */
1020
1021	ENTRY_NP(dtrace_fasttrap)
1022	TRAP_NOERR(T_DTRACE_PROBE)
1023	jmp	dtrace_trap
1024	SET_SIZE(dtrace_fasttrap)
1025
1026	ENTRY_NP(dtrace_ret)
1027	TRAP_NOERR(T_DTRACE_RET)
1028	jmp	dtrace_trap
1029	SET_SIZE(dtrace_ret)
1030
1031#if defined(__amd64)
1032
1033	/*
1034	 * RFLAGS 24 bytes up the stack from %rsp.
1035	 * XXX a constant would be nicer.
1036	 */
1037	ENTRY_NP(fast_null)
1038	orq	$PS_C, 24(%rsp)	/* set carry bit in user flags */
1039	iretq
1040	SET_SIZE(fast_null)
1041
1042#elif defined(__i386)
1043
1044	ENTRY_NP(fast_null)
1045	orw	$PS_C, 8(%esp)	/* set carry bit in user flags */
1046	iret
1047	SET_SIZE(fast_null)
1048
1049#endif	/* __i386 */
1050
1051	/*
1052	 * Interrupts start at 32
1053	 */
1054#define MKIVCT(n)			\
1055	ENTRY_NP(ivct/**/n)		\
1056	push	$0;			\
1057	push	$n - 0x20;		\
1058	jmp	cmnint;			\
1059	SET_SIZE(ivct/**/n)
1060
1061	MKIVCT(32)
1062	MKIVCT(33)
1063	MKIVCT(34)
1064	MKIVCT(35)
1065	MKIVCT(36)
1066	MKIVCT(37)
1067	MKIVCT(38)
1068	MKIVCT(39)
1069	MKIVCT(40)
1070	MKIVCT(41)
1071	MKIVCT(42)
1072	MKIVCT(43)
1073	MKIVCT(44)
1074	MKIVCT(45)
1075	MKIVCT(46)
1076	MKIVCT(47)
1077	MKIVCT(48)
1078	MKIVCT(49)
1079	MKIVCT(50)
1080	MKIVCT(51)
1081	MKIVCT(52)
1082	MKIVCT(53)
1083	MKIVCT(54)
1084	MKIVCT(55)
1085	MKIVCT(56)
1086	MKIVCT(57)
1087	MKIVCT(58)
1088	MKIVCT(59)
1089	MKIVCT(60)
1090	MKIVCT(61)
1091	MKIVCT(62)
1092	MKIVCT(63)
1093	MKIVCT(64)
1094	MKIVCT(65)
1095	MKIVCT(66)
1096	MKIVCT(67)
1097	MKIVCT(68)
1098	MKIVCT(69)
1099	MKIVCT(70)
1100	MKIVCT(71)
1101	MKIVCT(72)
1102	MKIVCT(73)
1103	MKIVCT(74)
1104	MKIVCT(75)
1105	MKIVCT(76)
1106	MKIVCT(77)
1107	MKIVCT(78)
1108	MKIVCT(79)
1109	MKIVCT(80)
1110	MKIVCT(81)
1111	MKIVCT(82)
1112	MKIVCT(83)
1113	MKIVCT(84)
1114	MKIVCT(85)
1115	MKIVCT(86)
1116	MKIVCT(87)
1117	MKIVCT(88)
1118	MKIVCT(89)
1119	MKIVCT(90)
1120	MKIVCT(91)
1121	MKIVCT(92)
1122	MKIVCT(93)
1123	MKIVCT(94)
1124	MKIVCT(95)
1125	MKIVCT(96)
1126	MKIVCT(97)
1127	MKIVCT(98)
1128	MKIVCT(99)
1129	MKIVCT(100)
1130	MKIVCT(101)
1131	MKIVCT(102)
1132	MKIVCT(103)
1133	MKIVCT(104)
1134	MKIVCT(105)
1135	MKIVCT(106)
1136	MKIVCT(107)
1137	MKIVCT(108)
1138	MKIVCT(109)
1139	MKIVCT(110)
1140	MKIVCT(111)
1141	MKIVCT(112)
1142	MKIVCT(113)
1143	MKIVCT(114)
1144	MKIVCT(115)
1145	MKIVCT(116)
1146	MKIVCT(117)
1147	MKIVCT(118)
1148	MKIVCT(119)
1149	MKIVCT(120)
1150	MKIVCT(121)
1151	MKIVCT(122)
1152	MKIVCT(123)
1153	MKIVCT(124)
1154	MKIVCT(125)
1155	MKIVCT(126)
1156	MKIVCT(127)
1157	MKIVCT(128)
1158	MKIVCT(129)
1159	MKIVCT(130)
1160	MKIVCT(131)
1161	MKIVCT(132)
1162	MKIVCT(133)
1163	MKIVCT(134)
1164	MKIVCT(135)
1165	MKIVCT(136)
1166	MKIVCT(137)
1167	MKIVCT(138)
1168	MKIVCT(139)
1169	MKIVCT(140)
1170	MKIVCT(141)
1171	MKIVCT(142)
1172	MKIVCT(143)
1173	MKIVCT(144)
1174	MKIVCT(145)
1175	MKIVCT(146)
1176	MKIVCT(147)
1177	MKIVCT(148)
1178	MKIVCT(149)
1179	MKIVCT(150)
1180	MKIVCT(151)
1181	MKIVCT(152)
1182	MKIVCT(153)
1183	MKIVCT(154)
1184	MKIVCT(155)
1185	MKIVCT(156)
1186	MKIVCT(157)
1187	MKIVCT(158)
1188	MKIVCT(159)
1189	MKIVCT(160)
1190	MKIVCT(161)
1191	MKIVCT(162)
1192	MKIVCT(163)
1193	MKIVCT(164)
1194	MKIVCT(165)
1195	MKIVCT(166)
1196	MKIVCT(167)
1197	MKIVCT(168)
1198	MKIVCT(169)
1199	MKIVCT(170)
1200	MKIVCT(171)
1201	MKIVCT(172)
1202	MKIVCT(173)
1203	MKIVCT(174)
1204	MKIVCT(175)
1205	MKIVCT(176)
1206	MKIVCT(177)
1207	MKIVCT(178)
1208	MKIVCT(179)
1209	MKIVCT(180)
1210	MKIVCT(181)
1211	MKIVCT(182)
1212	MKIVCT(183)
1213	MKIVCT(184)
1214	MKIVCT(185)
1215	MKIVCT(186)
1216	MKIVCT(187)
1217	MKIVCT(188)
1218	MKIVCT(189)
1219	MKIVCT(190)
1220	MKIVCT(191)
1221	MKIVCT(192)
1222	MKIVCT(193)
1223	MKIVCT(194)
1224	MKIVCT(195)
1225	MKIVCT(196)
1226	MKIVCT(197)
1227	MKIVCT(198)
1228	MKIVCT(199)
1229	MKIVCT(200)
1230	MKIVCT(201)
1231	MKIVCT(202)
1232	MKIVCT(203)
1233	MKIVCT(204)
1234	MKIVCT(205)
1235	MKIVCT(206)
1236	MKIVCT(207)
1237	MKIVCT(208)
1238	MKIVCT(209)
1239	MKIVCT(210)
1240	MKIVCT(211)
1241	MKIVCT(212)
1242	MKIVCT(213)
1243	MKIVCT(214)
1244	MKIVCT(215)
1245	MKIVCT(216)
1246	MKIVCT(217)
1247	MKIVCT(218)
1248	MKIVCT(219)
1249	MKIVCT(220)
1250	MKIVCT(221)
1251	MKIVCT(222)
1252	MKIVCT(223)
1253	MKIVCT(224)
1254	MKIVCT(225)
1255	MKIVCT(226)
1256	MKIVCT(227)
1257	MKIVCT(228)
1258	MKIVCT(229)
1259	MKIVCT(230)
1260	MKIVCT(231)
1261	MKIVCT(232)
1262	MKIVCT(233)
1263	MKIVCT(234)
1264	MKIVCT(235)
1265	MKIVCT(236)
1266	MKIVCT(237)
1267	MKIVCT(238)
1268	MKIVCT(239)
1269	MKIVCT(240)
1270	MKIVCT(241)
1271	MKIVCT(242)
1272	MKIVCT(243)
1273	MKIVCT(244)
1274	MKIVCT(245)
1275	MKIVCT(246)
1276	MKIVCT(247)
1277	MKIVCT(248)
1278	MKIVCT(249)
1279	MKIVCT(250)
1280	MKIVCT(251)
1281	MKIVCT(252)
1282	MKIVCT(253)
1283	MKIVCT(254)
1284	MKIVCT(255)
1285
1286#endif	/* __lint */
1287