xref: /titanic_41/usr/src/uts/intel/ia32/ml/exception.s (revision 24da5b34f49324ed742a340010ed5bd3d4e06625)
1/*
2 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
3 * Use is subject to license terms.
4 */
5
6/*
7 * Copyright (c) 1989, 1990 William F. Jolitz.
8 * Copyright (c) 1990 The Regents of the University of California.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
40 */
41
42#pragma ident	"%Z%%M%	%I%	%E% SMI"
43
44#include <sys/asm_linkage.h>
45#include <sys/asm_misc.h>
46#include <sys/trap.h>
47#include <sys/psw.h>
48#include <sys/regset.h>
49#include <sys/privregs.h>
50#include <sys/dtrace.h>
51#include <sys/x86_archext.h>
52#include <sys/traptrace.h>
53#include <sys/machparam.h>
54
55/*
56 * only one routine in this file is interesting to lint
57 */
58
59#if defined(__lint)
60
61void
62ndptrap_frstor(void)
63{}
64
65#else
66
67#include "assym.h"
68
69/*
70 * push $0 on stack for traps that do not
71 * generate an error code. This is so the rest
72 * of the kernel can expect a consistent stack
73 * from from any exception.
74 */
75
76#define	TRAP_NOERR(trapno)	\
77	push	$0;		\
78	push	$trapno
79
80#define	NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
81
82/*
83 * error code already pushed by hw
84 * onto stack.
85 */
86#define	TRAP_ERR(trapno)	\
87	push	$trapno
88
89
90	/*
91	 * #DE
92	 */
93	ENTRY_NP(div0trap)
94	TRAP_NOERR(T_ZERODIV)	/* $0 */
95	jmp	cmntrap
96	SET_SIZE(div0trap)
97
98	/*
99	 * #DB
100	 *
101	 * Fetch %dr6 and clear it, handing off the value to the
102	 * cmntrap code in %r15/%esi
103	 */
104	ENTRY_NP(dbgtrap)
105	TRAP_NOERR(T_SGLSTP)	/* $1 */
106
107#if defined(__amd64)
108	/*
109	 * If we get here as a result of single-stepping a sysenter
110	 * instruction, we suddenly find ourselves taking a #db
111	 * in kernel mode -before- we've swapgs'ed.  So before we can
112	 * take the trap, we do the swapgs here, and fix the return
113	 * %rip in trap() so that we return immediately after the
114	 * swapgs in the sysenter handler to avoid doing the swapgs again.
115	 *
116	 * Nobody said that the design of sysenter was particularly
117	 * elegant, did they?
118	 */
119
120	pushq	%r11
121
122	/*
123	 * At this point the stack looks like this:
124	 *
125	 * (high address) 	r_ss
126	 *			r_rsp
127	 *			r_rfl
128	 *			r_cs
129	 *			r_rip		<-- %rsp + 24
130	 *			r_err		<-- %rsp + 16
131	 *			r_trapno	<-- %rsp + 8
132	 * (low address)	%r11		<-- %rsp
133	 */
134	leaq	sys_sysenter(%rip), %r11
135	cmpq	%r11, 24(%rsp)	/* Compare to saved r_rip on the stack */
136	jne	1f
137	SWAPGS
1381:	popq	%r11
139
140	INTR_PUSH
141	movq	%db6, %r15
142	xorl	%eax, %eax
143	movq	%rax, %db6
144
145#elif defined(__i386)
146
147	INTR_PUSH
148	movl	%db6, %esi
149	xorl	%eax, %eax
150	movl	%eax, %db6
151#endif	/* __i386 */
152
153	jmp	cmntrap_pushed
154	SET_SIZE(dbgtrap)
155
156#if defined(__amd64)
157
158/*
159 * Macro to set the gsbase or kgsbase to the address of the struct cpu
160 * for this processor.  If we came from userland, set kgsbase else
161 * set gsbase.  We find the proper cpu struct by looping through
162 * the cpu structs for all processors till we find a match for the gdt
163 * of the trapping processor.  The stack is expected to be pointing at
164 * the standard regs pushed by hardware on a trap (plus error code and trapno).
165 */
166#define	SET_CPU_GSBASE							\
167	subq	$REGOFF_TRAPNO, %rsp;	/* save regs */			\
168	movq	%rax, REGOFF_RAX(%rsp);					\
169	movq	%rbx, REGOFF_RBX(%rsp);					\
170	movq	%rcx, REGOFF_RCX(%rsp);					\
171	movq	%rdx, REGOFF_RDX(%rsp);					\
172	movq	%rbp, REGOFF_RBP(%rsp);					\
173	movq	%rsp, %rbp;						\
174	subq	$16, %rsp;		/* space for gdt */		\
175	sgdt	6(%rsp);						\
176	movq	8(%rsp), %rcx;		/* %rcx has gdt to match */	\
177	xorl	%ebx, %ebx;		/* loop index */		\
178	leaq	cpu(%rip), %rdx;	/* cpu pointer array */		\
1791:									\
180	movq	(%rdx, %rbx, CLONGSIZE), %rax;	/* get cpu[i] */	\
181	cmpq	$0x0, %rax;		/* cpu[i] == NULL ? */		\
182	je	2f;			/* yes, continue */		\
183	cmpq	%rcx, CPU_GDT(%rax);	/* gdt == cpu[i]->cpu_gdt ? */	\
184	je	3f;			/* yes, go set gsbase */	\
1852:									\
186	incl	%ebx;			/* i++ */			\
187	cmpl	$NCPU, %ebx;		/* i < NCPU ? */		\
188	jb	1b;			/* yes, loop */			\
189/* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */	\
1903:									\
191	movl	$MSR_AMD_KGSBASE, %ecx;					\
192	cmpw	$KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */	\
193	jne	4f;			/* no, go set KGSBASE */	\
194	movl	$MSR_AMD_GSBASE, %ecx;	/* yes, set GSBASE */		\
195        mfence;				/* OPTERON_ERRATUM_88 */	\
1964:									\
197	movq	%rax, %rdx;		/* write base register */	\
198	shrq	$32, %rdx;						\
199	wrmsr;								\
200	movq	REGOFF_RDX(%rbp), %rdx;	/* restore regs */		\
201	movq	REGOFF_RCX(%rbp), %rcx;					\
202	movq	REGOFF_RBX(%rbp), %rbx;					\
203	movq	REGOFF_RAX(%rbp), %rax;					\
204	movq	%rbp, %rsp;						\
205	movq	REGOFF_RBP(%rsp), %rbp;					\
206	addq	$REGOFF_TRAPNO, %rsp	/* pop stack */
207
208#endif	/* __amd64 */
209
210
211#if defined(__amd64)
212
213	/*
214	 * #NMI
215	 */
216	ENTRY_NP(nmiint)
217	TRAP_NOERR(T_NMIFLT)	/* $2 */
218
219	SET_CPU_GSBASE
220
221	/*
222	 * Save all registers and setup segment registers
223	 * with kernel selectors.
224	 */
225	INTR_PUSH
226	INTGATE_INIT_KERNEL_FLAGS
227
228	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
229	TRACE_REGS(%r12, %rsp, %rax, %rbx)
230	TRACE_STAMP(%r12)
231
232	movq	%rsp, %rbp
233
234	movq	%rbp, %rdi
235	call	av_dispatch_nmivect
236
237	INTR_POP
238	IRET
239	/*NOTREACHED*/
240	SET_SIZE(nmiint)
241
242#elif defined(__i386)
243
244	/*
245	 * #NMI
246	 */
247	ENTRY_NP(nmiint)
248	TRAP_NOERR(T_NMIFLT)	/* $2 */
249
250	/*
251	 * Save all registers and setup segment registers
252	 * with kernel selectors.
253	 */
254	INTR_PUSH
255	INTGATE_INIT_KERNEL_FLAGS
256
257	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
258	TRACE_REGS(%edi, %esp, %ebx, %ecx)
259	TRACE_STAMP(%edi)
260
261	movl	%esp, %ebp
262
263	pushl	%ebp
264	call	av_dispatch_nmivect
265	addl	$4, %esp
266
267	INTR_POP_USER
268	IRET
269	SET_SIZE(nmiint)
270
271#endif	/* __i386 */
272
273	/*
274	 * #BP
275	 */
276	ENTRY_NP(brktrap)
277
278#if defined(__amd64)
279	cmpw	$KCS_SEL, 8(%rsp)
280	jne	bp_user
281
282	/*
283	 * This is a breakpoint in the kernel -- it is very likely that this
284	 * is DTrace-induced.  To unify DTrace handling, we spoof this as an
285	 * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
286	 * we must decrement the trapping %rip to make it appear as a fault.
287	 * We then push a non-zero error code to indicate that this is coming
288	 * from #BP.
289	 */
290	decq	(%rsp)
291	push	$1			/* error code -- non-zero for #BP */
292	jmp	ud_kernel
293
294bp_user:
295#endif /* __amd64 */
296
297	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
298	jmp	dtrace_trap
299
300	SET_SIZE(brktrap)
301
302	/*
303	 * #OF
304	 */
305	ENTRY_NP(ovflotrap)
306	TRAP_NOERR(T_OVFLW)	/* $4 */
307	jmp	cmntrap
308	SET_SIZE(ovflotrap)
309
310	/*
311	 * #BR
312	 */
313	ENTRY_NP(boundstrap)
314	TRAP_NOERR(T_BOUNDFLT)	/* $5 */
315	jmp	cmntrap
316	SET_SIZE(boundstrap)
317
318#if defined(__amd64)
319
320	ENTRY_NP(invoptrap)
321
322	cmpw	$KCS_SEL, 8(%rsp)
323	jne	ud_user
324
325	push	$0			/* error code -- zero for #UD */
326ud_kernel:
327	push	$0xdddd			/* a dummy trap number */
328	INTR_PUSH
329	movq	REGOFF_RIP(%rsp), %rdi
330	movq	REGOFF_RSP(%rsp), %rsi
331	movq	REGOFF_RAX(%rsp), %rdx
332	pushq	(%rsi)
333	movq	%rsp, %rsi
334	call	dtrace_invop
335	ALTENTRY(dtrace_invop_callsite)
336	addq	$8, %rsp
337	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
338	je	ud_push
339	cmpl	$DTRACE_INVOP_LEAVE, %eax
340	je	ud_leave
341	cmpl	$DTRACE_INVOP_NOP, %eax
342	je	ud_nop
343	cmpl	$DTRACE_INVOP_RET, %eax
344	je	ud_ret
345	jmp	ud_trap
346
347ud_push:
348	/*
349	 * We must emulate a "pushq %rbp".  To do this, we pull the stack
350	 * down 8 bytes, and then store the base pointer.
351	 */
352	INTR_POP
353	subq	$16, %rsp		/* make room for %rbp */
354	pushq	%rax			/* push temp */
355	movq	24(%rsp), %rax		/* load calling RIP */
356	addq	$1, %rax		/* increment over trapping instr */
357	movq	%rax, 8(%rsp)		/* store calling RIP */
358	movq	32(%rsp), %rax		/* load calling CS */
359	movq	%rax, 16(%rsp)		/* store calling CS */
360	movq	40(%rsp), %rax		/* load calling RFLAGS */
361	movq	%rax, 24(%rsp)		/* store calling RFLAGS */
362	movq	48(%rsp), %rax		/* load calling RSP */
363	subq	$8, %rax		/* make room for %rbp */
364	movq	%rax, 32(%rsp)		/* store calling RSP */
365	movq	56(%rsp), %rax		/* load calling SS */
366	movq	%rax, 40(%rsp)		/* store calling SS */
367	movq	32(%rsp), %rax		/* reload calling RSP */
368	movq	%rbp, (%rax)		/* store %rbp there */
369	popq	%rax			/* pop off temp */
370	IRET				/* return from interrupt */
371	/*NOTREACHED*/
372
373ud_leave:
374	/*
375	 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
376	 * followed by a "popq %rbp".  This is quite a bit simpler on amd64
377	 * than it is on i386 -- we can exploit the fact that the %rsp is
378	 * explicitly saved to effect the pop without having to reshuffle
379	 * the other data pushed for the trap.
380	 */
381	INTR_POP
382	pushq	%rax			/* push temp */
383	movq	8(%rsp), %rax		/* load calling RIP */
384	addq	$1, %rax		/* increment over trapping instr */
385	movq	%rax, 8(%rsp)		/* store calling RIP */
386	movq	(%rbp), %rax		/* get new %rbp */
387	addq	$8, %rbp		/* adjust new %rsp */
388	movq	%rbp, 32(%rsp)		/* store new %rsp */
389	movq	%rax, %rbp		/* set new %rbp */
390	popq	%rax			/* pop off temp */
391	IRET				/* return from interrupt */
392	/*NOTREACHED*/
393
394ud_nop:
395	/*
396	 * We must emulate a "nop".  This is obviously not hard:  we need only
397	 * advance the %rip by one.
398	 */
399	INTR_POP
400	incq	(%rsp)
401	IRET
402	/*NOTREACHED*/
403
404ud_ret:
405	INTR_POP
406	pushq	%rax			/* push temp */
407	movq	32(%rsp), %rax		/* load %rsp */
408	movq	(%rax), %rax		/* load calling RIP */
409	movq	%rax, 8(%rsp)		/* store calling RIP */
410	addq	$8, 32(%rsp)		/* adjust new %rsp */
411	popq	%rax			/* pop off temp */
412	IRET				/* return from interrupt */
413	/*NOTREACHED*/
414
415ud_trap:
416	/*
417	 * We're going to let the kernel handle this as a normal #UD.  If,
418	 * however, we came through #BP and are spoofing #UD (in this case,
419	 * the stored error value will be non-zero), we need to de-spoof
420	 * the trap by incrementing %rip and pushing T_BPTFLT.
421	 */
422	cmpq	$0, REGOFF_ERR(%rsp)
423	je	ud_ud
424	incq	REGOFF_RIP(%rsp)
425	addq	$REGOFF_RIP, %rsp
426	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
427	jmp	cmntrap
428
429ud_ud:
430	addq	$REGOFF_RIP, %rsp
431ud_user:
432	NPTRAP_NOERR(T_ILLINST)
433	jmp	cmntrap
434	SET_SIZE(invoptrap)
435
436#elif defined(__i386)
437
438	/*
439	 * #UD
440	 */
441	ENTRY_NP(invoptrap)
442	/*
443	 * If we are taking an invalid opcode trap while in the kernel, this
444	 * is likely an FBT probe point.
445	 */
446	pushl   %gs
447	cmpw	$KGS_SEL, (%esp)
448	jne	8f
449
450	addl	$4, %esp
451	pusha
452	pushl	%eax			/* push %eax -- may be return value */
453	pushl	%esp			/* push stack pointer */
454	addl	$48, (%esp)		/* adjust to incoming args */
455	pushl	40(%esp)		/* push calling EIP */
456	call	dtrace_invop
457	ALTENTRY(dtrace_invop_callsite)
458	addl	$12, %esp
459	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
460	je	1f
461	cmpl	$DTRACE_INVOP_POPL_EBP, %eax
462	je	2f
463	cmpl	$DTRACE_INVOP_LEAVE, %eax
464	je	3f
465	cmpl	$DTRACE_INVOP_NOP, %eax
466	je	4f
467	jmp	7f
4681:
469	/*
470	 * We must emulate a "pushl %ebp".  To do this, we pull the stack
471	 * down 4 bytes, and then store the base pointer.
472	 */
473	popa
474	subl	$4, %esp		/* make room for %ebp */
475	pushl	%eax			/* push temp */
476	movl	8(%esp), %eax		/* load calling EIP */
477	incl	%eax			/* increment over LOCK prefix */
478	movl	%eax, 4(%esp)		/* store calling EIP */
479	movl	12(%esp), %eax		/* load calling CS */
480	movl	%eax, 8(%esp)		/* store calling CS */
481	movl	16(%esp), %eax		/* load calling EFLAGS */
482	movl	%eax, 12(%esp)		/* store calling EFLAGS */
483	movl	%ebp, 16(%esp)		/* push %ebp */
484	popl	%eax			/* pop off temp */
485	jmp	_emul_done
4862:
487	/*
488	 * We must emulate a "popl %ebp".  To do this, we do the opposite of
489	 * the above:  we remove the %ebp from the stack, and squeeze up the
490	 * saved state from the trap.
491	 */
492	popa
493	pushl	%eax			/* push temp */
494	movl	16(%esp), %ebp		/* pop %ebp */
495	movl	12(%esp), %eax		/* load calling EFLAGS */
496	movl	%eax, 16(%esp)		/* store calling EFLAGS */
497	movl	8(%esp), %eax		/* load calling CS */
498	movl	%eax, 12(%esp)		/* store calling CS */
499	movl	4(%esp), %eax		/* load calling EIP */
500	incl	%eax			/* increment over LOCK prefix */
501	movl	%eax, 8(%esp)		/* store calling EIP */
502	popl	%eax			/* pop off temp */
503	addl	$4, %esp		/* adjust stack pointer */
504	jmp	_emul_done
5053:
506	/*
507	 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
508	 * followed by a "popl %ebp".  This looks similar to the above, but
509	 * requires two temporaries:  one for the new base pointer, and one
510	 * for the staging register.
511	 */
512	popa
513	pushl	%eax			/* push temp */
514	pushl	%ebx			/* push temp */
515	movl	%ebp, %ebx		/* set temp to old %ebp */
516	movl	(%ebx), %ebp		/* pop %ebp */
517	movl	16(%esp), %eax		/* load calling EFLAGS */
518	movl	%eax, (%ebx)		/* store calling EFLAGS */
519	movl	12(%esp), %eax		/* load calling CS */
520	movl	%eax, -4(%ebx)		/* store calling CS */
521	movl	8(%esp), %eax		/* load calling EIP */
522	incl	%eax			/* increment over LOCK prefix */
523	movl	%eax, -8(%ebx)		/* store calling EIP */
524	movl	%ebx, -4(%esp)		/* temporarily store new %esp */
525	popl	%ebx			/* pop off temp */
526	popl	%eax			/* pop off temp */
527	movl	-12(%esp), %esp		/* set stack pointer */
528	subl	$8, %esp		/* adjust for three pushes, one pop */
529	jmp	_emul_done
5304:
531	/*
532	 * We must emulate a "nop".  This is obviously not hard:  we need only
533	 * advance the %eip by one.
534	 */
535	popa
536	incl	(%esp)
537_emul_done:
538	IRET				/* return from interrupt */
5397:
540	popa
541	pushl	$0
542	pushl	$T_ILLINST	/* $6 */
543	jmp	cmntrap
5448:
545	addl	$4, %esp
546	pushl	$0
547	pushl	$T_ILLINST	/* $6 */
548	jmp	cmntrap
549	SET_SIZE(invoptrap)
550
551#endif	/* __i386 */
552
553#if defined(__amd64)
554
555	/*
556	 * #NM
557	 */
558	ENTRY_NP(ndptrap)
559	/*
560	 * We want to do this quickly as every lwp using fp will take this
561	 * after a context switch -- we do the frequent path in ndptrap_frstor
562	 * below; for all other cases, we let the trap code handle it
563	 */
564	pushq	%rax
565	pushq	%rbx
566	cmpw    $KCS_SEL, 24(%rsp)	/* did we come from kernel mode? */
567	jne     1f
568	LOADCPU(%rbx)			/* if yes, don't swapgs */
569	jmp	2f
5701:
571	SWAPGS				/* if from user, need swapgs */
572	LOADCPU(%rbx)
573	SWAPGS
5742:
575	cmpl	$0, fpu_exists(%rip)
576	je	.handle_in_trap		/* let trap handle no fp case */
577	movq	CPU_THREAD(%rbx), %rax	/* %rax = curthread */
578	movl	$FPU_EN, %ebx
579	movq	T_LWP(%rax), %rax	/* %rax = lwp */
580	testq	%rax, %rax
581	jz	.handle_in_trap		/* should not happen? */
582#if LWP_PCB_FPU	!= 0
583	addq	$LWP_PCB_FPU, %rax	/* &lwp->lwp_pcb.pcb_fpu */
584#endif
585	testl	%ebx, PCB_FPU_FLAGS(%rax)
586	jz	.handle_in_trap		/* must be the first fault */
587	clts
588	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rax)
589#if FPU_CTX_FPU_REGS != 0
590	addq	$FPU_CTX_FPU_REGS, %rax
591#endif
592	/*
593	 * the label below is used in trap.c to detect FP faults in
594	 * kernel due to user fault.
595	 */
596	ALTENTRY(ndptrap_frstor)
597	fxrstor	(%rax)
598	popq	%rbx
599	popq	%rax
600	IRET
601	/*NOTREACHED*/
602
603.handle_in_trap:
604	popq	%rbx
605	popq	%rax
606	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
607	jmp	cmninttrap
608	SET_SIZE(ndptrap_frstor)
609	SET_SIZE(ndptrap)
610
611#elif defined(__i386)
612
613	ENTRY_NP(ndptrap)
614	/*
615	 * We want to do this quickly as every lwp using fp will take this
616	 * after a context switch -- we do the frequent path in fpnoextflt
617	 * below; for all other cases, we let the trap code handle it
618	 */
619	pushl	%eax
620	pushl	%ebx
621	pushl	%ds
622	pushl	%gs
623	movl	$KDS_SEL, %ebx
624	movw	%bx, %ds
625	movl	$KGS_SEL, %eax
626	movw	%ax, %gs
627	LOADCPU(%eax)
628	cmpl	$0, fpu_exists
629	je	.handle_in_trap		/* let trap handle no fp case */
630	movl	CPU_THREAD(%eax), %ebx	/* %ebx = curthread */
631	movl	$FPU_EN, %eax
632	movl	T_LWP(%ebx), %ebx	/* %ebx = lwp */
633	testl	%ebx, %ebx
634	jz	.handle_in_trap		/* should not happen? */
635#if LWP_PCB_FPU != 0
636	addl	$LWP_PCB_FPU, %ebx 	/* &lwp->lwp_pcb.pcb_fpu */
637#endif
638	testl	%eax, PCB_FPU_FLAGS(%ebx)
639	jz	.handle_in_trap		/* must be the first fault */
640	CLTS
641	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx)
642#if FPU_CTX_FPU_REGS != 0
643	addl	$FPU_CTX_FPU_REGS, %ebx
644#endif
645	/*
646	 * the label below is used in trap.c to detect FP faults in kernel
647	 * due to user fault.
648	 */
649	ALTENTRY(ndptrap_frstor)
650	.globl	_patch_fxrstor_ebx
651_patch_fxrstor_ebx:
652	frstor	(%ebx)		/* may be patched to fxrstor */
653	nop			/* (including this byte) */
654	popl	%gs
655	popl	%ds
656	popl	%ebx
657	popl	%eax
658	IRET
659
660.handle_in_trap:
661	popl	%gs
662	popl	%ds
663	popl	%ebx
664	popl	%eax
665	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
666	jmp	cmninttrap
667	SET_SIZE(ndptrap_frstor)
668	SET_SIZE(ndptrap)
669
670#endif	/* __i386 */
671
672#if defined(__amd64)
673
674	/*
675	 * #DF
676	 */
677	ENTRY_NP(syserrtrap)
678	pushq	$T_DBLFLT
679
680	SET_CPU_GSBASE
681
682	/*
683	 * We share this handler with kmdb (if kmdb is loaded).  As such, we may
684	 * have reached this point after encountering a #df in kmdb.  If that
685	 * happens, we'll still be on kmdb's IDT.  We need to switch back to this
686	 * CPU's IDT before proceeding.  Furthermore, if we did arrive here from
687	 * kmdb, kmdb is probably in a very sickly state, and shouldn't be
688	 * entered from the panic flow.  We'll suppress that entry by setting
689	 * nopanicdebug.
690	 */
691	pushq	%rax
692	subq	$DESCTBR_SIZE, %rsp
693	sidt	(%rsp)
694	movq	%gs:CPU_IDT, %rax
695	cmpq	%rax, DTR_BASE(%rsp)
696	je	1f
697
698	movq	%rax, DTR_BASE(%rsp)
699	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
700	lidt	(%rsp)
701
702	movl	$1, nopanicdebug
703
7041:	addq	$DESCTBR_SIZE, %rsp
705	popq	%rax
706
707	DFTRAP_PUSH
708
709	/*
710	 * freeze trap trace.
711	 */
712#ifdef TRAPTRACE
713	leaq	trap_trace_freeze(%rip), %r11
714	incl	(%r11)
715#endif
716
717	ENABLE_INTR_FLAGS
718
719	movq	%rsp, %rdi	/* &regs */
720	xorl	%esi, %esi	/* clear address */
721	xorl	%edx, %edx	/* cpuid = 0 */
722	call	trap
723
724	SET_SIZE(syserrtrap)
725
726#elif defined(__i386)
727
728	/*
729	 * #DF
730	 */
731	ENTRY_NP(syserrtrap)
732	cli				/* disable interrupts */
733
734	/*
735	 * We share this handler with kmdb (if kmdb is loaded).  As such, we may
736	 * have reached this point after encountering a #df in kmdb.  If that
737	 * happens, we'll still be on kmdb's IDT.  We need to switch back to this
738	 * CPU's IDT before proceeding.  Furthermore, if we did arrive here from
739	 * kmdb, kmdb is probably in a very sickly state, and shouldn't be
740	 * entered from the panic flow.  We'll suppress that entry by setting
741	 * nopanicdebug.
742	 */
743	subl	$DESCTBR_SIZE, %esp
744	movl	%gs:CPU_IDT, %eax
745	sidt	(%esp)
746	cmpl	DTR_BASE(%esp), %eax
747	je	1f
748
749	movl	%eax, DTR_BASE(%esp)
750	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
751	lidt	(%esp)
752
753	movl	$1, nopanicdebug
754
7551:	addl	$DESCTBR_SIZE, %esp
756
757	/*
758	 * Check the CPL in the TSS to see what mode
759	 * (user or kernel) we took the fault in.  At this
760	 * point we are running in the context of the double
761	 * fault task (dftss) but the CPU's task points to
762	 * the previous task (ktss) where the process context
763	 * has been saved as the result of the task switch.
764	 */
765	movl	%gs:CPU_TSS, %eax	/* get the TSS */
766	movl	TSS_SS(%eax), %ebx	/* save the fault SS */
767	movl	TSS_ESP(%eax), %edx	/* save the fault ESP */
768	testw	$CPL_MASK, TSS_CS(%eax)	/* user mode ? */
769	jz	make_frame
770	movw	TSS_SS0(%eax), %ss	/* get on the kernel stack */
771	movl	TSS_ESP0(%eax), %esp
772
773	/*
774	 * Clear the NT flag to avoid a task switch when the process
775	 * finally pops the EFL off the stack via an iret.  Clear
776	 * the TF flag since that is what the processor does for
777	 * a normal exception. Clear the IE flag so that interrupts
778	 * remain disabled.
779	 */
780	movl	TSS_EFL(%eax), %ecx
781	andl	$_BITNOT(PS_NT|PS_T|PS_IE), %ecx
782	pushl	%ecx
783	popfl				/* restore the EFL */
784	movw	TSS_LDT(%eax), %cx	/* restore the LDT */
785	lldt	%cx
786
787	/*
788	 * Restore process segment selectors.
789	 */
790	movw	TSS_DS(%eax), %ds
791	movw	TSS_ES(%eax), %es
792	movw	TSS_FS(%eax), %fs
793	movw	TSS_GS(%eax), %gs
794
795	/*
796	 * Restore task segment selectors.
797	 */
798	movl	$KDS_SEL, TSS_DS(%eax)
799	movl	$KDS_SEL, TSS_ES(%eax)
800	movl	$KDS_SEL, TSS_SS(%eax)
801	movl	$KFS_SEL, TSS_FS(%eax)
802	movl	$KGS_SEL, TSS_GS(%eax)
803
804	/*
805	 * Clear the TS bit, the busy bits in both task
806	 * descriptors, and switch tasks.
807	 */
808	clts
809	leal	gdt0, %ecx
810	movl	DFTSS_SEL+4(%ecx), %esi
811	andl	$_BITNOT(0x200), %esi
812	movl	%esi, DFTSS_SEL+4(%ecx)
813	movl	KTSS_SEL+4(%ecx), %esi
814	andl	$_BITNOT(0x200), %esi
815	movl	%esi, KTSS_SEL+4(%ecx)
816	movw	$KTSS_SEL, %cx
817	ltr	%cx
818
819	/*
820	 * Restore part of the process registers.
821	 */
822	movl	TSS_EBP(%eax), %ebp
823	movl	TSS_ECX(%eax), %ecx
824	movl	TSS_ESI(%eax), %esi
825	movl	TSS_EDI(%eax), %edi
826
827make_frame:
828	/*
829	 * Make a trap frame.  Leave the error code (0) on
830	 * the stack since the first word on a trap stack is
831	 * unused anyway.
832	 */
833	pushl	%ebx			/ fault SS
834	pushl	%edx			/ fault ESP
835	pushl	TSS_EFL(%eax)		/ fault EFL
836	pushl	TSS_CS(%eax)		/ fault CS
837	pushl	TSS_EIP(%eax)		/ fault EIP
838	pushl	$0			/ error code
839	pushl	$T_DBLFLT		/ trap number 8
840	movl	TSS_EBX(%eax), %ebx	/ restore EBX
841	movl	TSS_EDX(%eax), %edx	/ restore EDX
842	movl	TSS_EAX(%eax), %eax	/ restore EAX
843	sti				/ enable interrupts
844	jmp	cmntrap
845	SET_SIZE(syserrtrap)
846
847#endif	/* __i386 */
848
849	ENTRY_NP(overrun)
850	push	$0
851	TRAP_NOERR(T_EXTOVRFLT)	/* $9 i386 only - not generated */
852	jmp	cmninttrap
853	SET_SIZE(overrun)
854
855	/*
856	 * #TS
857	 */
858	ENTRY_NP(invtsstrap)
859	TRAP_ERR(T_TSSFLT)	/* $10 already have error code on stack */
860	jmp	cmntrap
861	SET_SIZE(invtsstrap)
862
863	/*
864	 * #NP
865	 */
866	ENTRY_NP(segnptrap)
867	TRAP_ERR(T_SEGFLT)	/* $11 already have error code on stack */
868#if defined(__amd64)
869	SET_CPU_GSBASE
870#endif
871	jmp	cmntrap
872	SET_SIZE(segnptrap)
873
874	/*
875	 * #SS
876	 */
877	ENTRY_NP(stktrap)
878	TRAP_ERR(T_STKFLT)	/* $12 already have error code on stack */
879	jmp	cmntrap
880	SET_SIZE(stktrap)
881
882	/*
883	 * #GP
884	 */
885	ENTRY_NP(gptrap)
886	TRAP_ERR(T_GPFLT)	/* $13 already have error code on stack */
887#if defined(__amd64)
888	SET_CPU_GSBASE
889#endif
890	jmp	cmntrap
891	SET_SIZE(gptrap)
892
893	/*
894	 * #PF
895	 */
896	ENTRY_NP(pftrap)
897	TRAP_ERR(T_PGFLT)	/* $14 already have error code on stack */
898	INTR_PUSH
899
900#if defined(__amd64)
901	movq	%cr2, %r15
902#elif defined(__i386)
903	movl	%cr2, %esi
904#endif	/* __i386 */
905
906	jmp	cmntrap_pushed
907	SET_SIZE(pftrap)
908
909#if !defined(__amd64)
910
911	.globl	idt0_default_r
912
913	/*
914	 * #PF pentium bug workaround
915	 */
916	ENTRY_NP(pentium_pftrap)
917	pushl	%eax
918	movl	%cr2, %eax
919	andl	$MMU_STD_PAGEMASK, %eax
920
921	cmpl	%eax, %cs:idt0_default_r+2	/* fixme */
922
923	je	check_for_user_address
924user_mode:
925	popl	%eax
926	pushl	$T_PGFLT	/* $14 */
927	jmp	cmntrap
928check_for_user_address:
929	/*
930	 * Before we assume that we have an unmapped trap on our hands,
931	 * check to see if this is a fault from user mode.  If it is,
932	 * we'll kick back into the page fault handler.
933	 */
934	movl	4(%esp), %eax	/* error code */
935	andl	$PF_ERR_USER, %eax
936	jnz	user_mode
937
938	/*
939	 * We now know that this is the invalid opcode trap.
940	 */
941	popl	%eax
942	addl	$4, %esp	/* pop error code */
943	jmp	invoptrap
944	SET_SIZE(pentium_pftrap)
945
946#endif	/* !__amd64 */
947
948	ENTRY_NP(resvtrap)
949	TRAP_NOERR(15)		/* (reserved)  */
950	jmp	cmntrap
951	SET_SIZE(resvtrap)
952
953	/*
954	 * #MF
955	 */
956	ENTRY_NP(ndperr)
957	TRAP_NOERR(T_EXTERRFLT)	/* $16 */
958	jmp	cmninttrap
959	SET_SIZE(ndperr)
960
961	/*
962	 * #AC
963	 */
964	ENTRY_NP(achktrap)
965	TRAP_ERR(T_ALIGNMENT)	/* $17 */
966	jmp	cmntrap
967	SET_SIZE(achktrap)
968
969	/*
970	 * #MC
971	 */
972	.globl	cmi_mca_trap	/* see uts/i86pc/os/cmi.c */
973
974#if defined(__amd64)
975
976	ENTRY_NP(mcetrap)
977	TRAP_NOERR(T_MCE)	/* $18 */
978
979	SET_CPU_GSBASE
980
981	INTR_PUSH
982	INTGATE_INIT_KERNEL_FLAGS
983
984	TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
985	TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
986	TRACE_STAMP(%rdi)
987
988	movq	%rsp, %rbp
989
990	movq	%rsp, %rdi	/* arg0 = struct regs *rp */
991	call	cmi_mca_trap	/* cmi_mca_trap(rp); */
992
993	jmp	_sys_rtt
994	SET_SIZE(mcetrap)
995
996#else
997
998	ENTRY_NP(mcetrap)
999	TRAP_NOERR(T_MCE)	/* $18 */
1000
1001	INTR_PUSH
1002	INTGATE_INIT_KERNEL_FLAGS
1003
1004	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1005	TRACE_REGS(%edi, %esp, %ebx, %ecx)
1006	TRACE_STAMP(%edi)
1007
1008	movl	%esp, %ebp
1009
1010	movl	%esp, %ecx
1011	pushl	%ecx		/* arg0 = struct regs *rp */
1012	call	cmi_mca_trap	/* cmi_mca_trap(rp) */
1013	addl	$4, %esp	/* pop arg0 */
1014
1015	jmp	_sys_rtt
1016	SET_SIZE(mcetrap)
1017
1018#endif
1019
1020	/*
1021	 * #XF
1022	 */
1023	ENTRY_NP(xmtrap)
1024	TRAP_NOERR(T_SIMDFPE)	/* $19 */
1025	jmp	cmntrap
1026	SET_SIZE(xmtrap)
1027
1028	ENTRY_NP(invaltrap)
1029	TRAP_NOERR(30)		/* very invalid */
1030	jmp	cmntrap
1031	SET_SIZE(invaltrap)
1032
1033	ENTRY_NP(invalint)
1034	TRAP_NOERR(31)		/* even more so */
1035	jmp	cmnint
1036	SET_SIZE(invalint)
1037
1038	.globl	fasttable
1039
1040#if defined(__amd64)
1041
1042	ENTRY_NP(fasttrap)
1043	cmpl	$T_LASTFAST, %eax
1044	ja	1f
1045	orl	%eax, %eax	/* (zero extend top 32-bits) */
1046	leaq	fasttable(%rip), %r11
1047	leaq	(%r11, %rax, CLONGSIZE), %r11
1048	jmp	*(%r11)
10491:
1050	/*
1051	 * Fast syscall number was illegal.  Make it look
1052	 * as if the INT failed.  Modify %rip to point before the
1053	 * INT, push the expected error code and fake a GP fault.
1054	 *
1055	 * XXX Why make the error code be offset into idt + 1?
1056	 * Instead we should push a real (soft?) error code
1057	 * on the stack and #gp handler could know about fasttraps?
1058	 */
1059	subq	$2, (%rsp)	/* XXX int insn 2-bytes */
1060	pushq	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1061	jmp	gptrap
1062	SET_SIZE(fasttrap)
1063
1064#elif defined(__i386)
1065
1066	ENTRY_NP(fasttrap)
1067	cmpl	$T_LASTFAST, %eax
1068	ja	1f
1069	jmp	*%cs:fasttable(, %eax, CLONGSIZE)
10701:
1071	/*
1072	 * Fast syscall number was illegal.  Make it look
1073	 * as if the INT failed.  Modify %eip to point before the
1074	 * INT, push the expected error code and fake a GP fault.
1075	 *
1076	 * XXX Why make the error code be offset into idt + 1?
1077	 * Instead we should push a real (soft?) error code
1078	 * on the stack and #gp handler could know about fasttraps?
1079	 */
1080	subl	$2, (%esp)	/* XXX int insn 2-bytes */
1081	pushl	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1082	jmp	gptrap
1083	SET_SIZE(fasttrap)
1084
1085#endif	/* __i386 */
1086
1087	ENTRY_NP(dtrace_ret)
1088	TRAP_NOERR(T_DTRACE_RET)
1089	jmp	dtrace_trap
1090	SET_SIZE(dtrace_ret)
1091
1092#if defined(__amd64)
1093
1094	/*
1095	 * RFLAGS 24 bytes up the stack from %rsp.
1096	 * XXX a constant would be nicer.
1097	 */
1098	ENTRY_NP(fast_null)
1099	orq	$PS_C, 24(%rsp)	/* set carry bit in user flags */
1100	IRET
1101	/*NOTREACHED*/
1102	SET_SIZE(fast_null)
1103
1104#elif defined(__i386)
1105
1106	ENTRY_NP(fast_null)
1107	orw	$PS_C, 8(%esp)	/* set carry bit in user flags */
1108	IRET
1109	SET_SIZE(fast_null)
1110
1111#endif	/* __i386 */
1112
1113	/*
1114	 * Interrupts start at 32
1115	 */
1116#define MKIVCT(n)			\
1117	ENTRY_NP(ivct/**/n)		\
1118	push	$0;			\
1119	push	$n - 0x20;		\
1120	jmp	cmnint;			\
1121	SET_SIZE(ivct/**/n)
1122
1123	MKIVCT(32)
1124	MKIVCT(33)
1125	MKIVCT(34)
1126	MKIVCT(35)
1127	MKIVCT(36)
1128	MKIVCT(37)
1129	MKIVCT(38)
1130	MKIVCT(39)
1131	MKIVCT(40)
1132	MKIVCT(41)
1133	MKIVCT(42)
1134	MKIVCT(43)
1135	MKIVCT(44)
1136	MKIVCT(45)
1137	MKIVCT(46)
1138	MKIVCT(47)
1139	MKIVCT(48)
1140	MKIVCT(49)
1141	MKIVCT(50)
1142	MKIVCT(51)
1143	MKIVCT(52)
1144	MKIVCT(53)
1145	MKIVCT(54)
1146	MKIVCT(55)
1147	MKIVCT(56)
1148	MKIVCT(57)
1149	MKIVCT(58)
1150	MKIVCT(59)
1151	MKIVCT(60)
1152	MKIVCT(61)
1153	MKIVCT(62)
1154	MKIVCT(63)
1155	MKIVCT(64)
1156	MKIVCT(65)
1157	MKIVCT(66)
1158	MKIVCT(67)
1159	MKIVCT(68)
1160	MKIVCT(69)
1161	MKIVCT(70)
1162	MKIVCT(71)
1163	MKIVCT(72)
1164	MKIVCT(73)
1165	MKIVCT(74)
1166	MKIVCT(75)
1167	MKIVCT(76)
1168	MKIVCT(77)
1169	MKIVCT(78)
1170	MKIVCT(79)
1171	MKIVCT(80)
1172	MKIVCT(81)
1173	MKIVCT(82)
1174	MKIVCT(83)
1175	MKIVCT(84)
1176	MKIVCT(85)
1177	MKIVCT(86)
1178	MKIVCT(87)
1179	MKIVCT(88)
1180	MKIVCT(89)
1181	MKIVCT(90)
1182	MKIVCT(91)
1183	MKIVCT(92)
1184	MKIVCT(93)
1185	MKIVCT(94)
1186	MKIVCT(95)
1187	MKIVCT(96)
1188	MKIVCT(97)
1189	MKIVCT(98)
1190	MKIVCT(99)
1191	MKIVCT(100)
1192	MKIVCT(101)
1193	MKIVCT(102)
1194	MKIVCT(103)
1195	MKIVCT(104)
1196	MKIVCT(105)
1197	MKIVCT(106)
1198	MKIVCT(107)
1199	MKIVCT(108)
1200	MKIVCT(109)
1201	MKIVCT(110)
1202	MKIVCT(111)
1203	MKIVCT(112)
1204	MKIVCT(113)
1205	MKIVCT(114)
1206	MKIVCT(115)
1207	MKIVCT(116)
1208	MKIVCT(117)
1209	MKIVCT(118)
1210	MKIVCT(119)
1211	MKIVCT(120)
1212	MKIVCT(121)
1213	MKIVCT(122)
1214	MKIVCT(123)
1215	MKIVCT(124)
1216	MKIVCT(125)
1217	MKIVCT(126)
1218	MKIVCT(127)
1219	MKIVCT(128)
1220	MKIVCT(129)
1221	MKIVCT(130)
1222	MKIVCT(131)
1223	MKIVCT(132)
1224	MKIVCT(133)
1225	MKIVCT(134)
1226	MKIVCT(135)
1227	MKIVCT(136)
1228	MKIVCT(137)
1229	MKIVCT(138)
1230	MKIVCT(139)
1231	MKIVCT(140)
1232	MKIVCT(141)
1233	MKIVCT(142)
1234	MKIVCT(143)
1235	MKIVCT(144)
1236	MKIVCT(145)
1237	MKIVCT(146)
1238	MKIVCT(147)
1239	MKIVCT(148)
1240	MKIVCT(149)
1241	MKIVCT(150)
1242	MKIVCT(151)
1243	MKIVCT(152)
1244	MKIVCT(153)
1245	MKIVCT(154)
1246	MKIVCT(155)
1247	MKIVCT(156)
1248	MKIVCT(157)
1249	MKIVCT(158)
1250	MKIVCT(159)
1251	MKIVCT(160)
1252	MKIVCT(161)
1253	MKIVCT(162)
1254	MKIVCT(163)
1255	MKIVCT(164)
1256	MKIVCT(165)
1257	MKIVCT(166)
1258	MKIVCT(167)
1259	MKIVCT(168)
1260	MKIVCT(169)
1261	MKIVCT(170)
1262	MKIVCT(171)
1263	MKIVCT(172)
1264	MKIVCT(173)
1265	MKIVCT(174)
1266	MKIVCT(175)
1267	MKIVCT(176)
1268	MKIVCT(177)
1269	MKIVCT(178)
1270	MKIVCT(179)
1271	MKIVCT(180)
1272	MKIVCT(181)
1273	MKIVCT(182)
1274	MKIVCT(183)
1275	MKIVCT(184)
1276	MKIVCT(185)
1277	MKIVCT(186)
1278	MKIVCT(187)
1279	MKIVCT(188)
1280	MKIVCT(189)
1281	MKIVCT(190)
1282	MKIVCT(191)
1283	MKIVCT(192)
1284	MKIVCT(193)
1285	MKIVCT(194)
1286	MKIVCT(195)
1287	MKIVCT(196)
1288	MKIVCT(197)
1289	MKIVCT(198)
1290	MKIVCT(199)
1291	MKIVCT(200)
1292	MKIVCT(201)
1293	MKIVCT(202)
1294	MKIVCT(203)
1295	MKIVCT(204)
1296	MKIVCT(205)
1297	MKIVCT(206)
1298	MKIVCT(207)
1299	MKIVCT(208)
1300	MKIVCT(209)
1301	MKIVCT(210)
1302	MKIVCT(211)
1303	MKIVCT(212)
1304	MKIVCT(213)
1305	MKIVCT(214)
1306	MKIVCT(215)
1307	MKIVCT(216)
1308	MKIVCT(217)
1309	MKIVCT(218)
1310	MKIVCT(219)
1311	MKIVCT(220)
1312	MKIVCT(221)
1313	MKIVCT(222)
1314	MKIVCT(223)
1315	MKIVCT(224)
1316	MKIVCT(225)
1317	MKIVCT(226)
1318	MKIVCT(227)
1319	MKIVCT(228)
1320	MKIVCT(229)
1321	MKIVCT(230)
1322	MKIVCT(231)
1323	MKIVCT(232)
1324	MKIVCT(233)
1325	MKIVCT(234)
1326	MKIVCT(235)
1327	MKIVCT(236)
1328	MKIVCT(237)
1329	MKIVCT(238)
1330	MKIVCT(239)
1331	MKIVCT(240)
1332	MKIVCT(241)
1333	MKIVCT(242)
1334	MKIVCT(243)
1335	MKIVCT(244)
1336	MKIVCT(245)
1337	MKIVCT(246)
1338	MKIVCT(247)
1339	MKIVCT(248)
1340	MKIVCT(249)
1341	MKIVCT(250)
1342	MKIVCT(251)
1343	MKIVCT(252)
1344	MKIVCT(253)
1345	MKIVCT(254)
1346	MKIVCT(255)
1347
1348#endif	/* __lint */
1349