xref: /titanic_41/usr/src/uts/intel/ia32/ml/exception.s (revision 3ee0e49223f178da635734759b9167f924321ff0)
1/*
2 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
3 * Use is subject to license terms.
4 */
5
6/*
7 * Copyright (c) 1989, 1990 William F. Jolitz.
8 * Copyright (c) 1990 The Regents of the University of California.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
40 */
41
42#pragma ident	"%Z%%M%	%I%	%E% SMI"
43
44#include <sys/asm_linkage.h>
45#include <sys/asm_misc.h>
46#include <sys/trap.h>
47#include <sys/psw.h>
48#include <sys/regset.h>
49#include <sys/privregs.h>
50#include <sys/dtrace.h>
51#include <sys/traptrace.h>
52#include <sys/machparam.h>
53
54/*
55 * only one routine in this file is interesting to lint
56 */
57
58#if defined(__lint)
59
60void
61ndptrap_frstor(void)
62{}
63
64#else
65
66#include "assym.h"
67
68/*
69 * push $0 on stack for traps that do not
70 * generate an error code. This is so the rest
71 * of the kernel can expect a consistent stack
72 * from from any exception.
73 */
74#define	TRAP_NOERR(trapno)	\
75	push	$0;		\
76	push	$trapno
77
78/*
79 * error code already pushed by hw
80 * onto stack.
81 */
82#define	TRAP_ERR(trapno)	\
83	push	$trapno
84
85	/*
86	 * #DE
87	 */
88	ENTRY_NP(div0trap)
89	TRAP_NOERR(T_ZERODIV)	/* $0 */
90	jmp	cmntrap
91	SET_SIZE(div0trap)
92
93#if defined(__amd64)
94	/*
95	 * #DB
96	 *
97	 * If we get here as a result of single-stepping a sysenter
98	 * instruction, we suddenly find ourselves taking a #db
99	 * in kernel mode -before- we've swapgs'ed.  So before we can
100	 * take the trap, we do the swapgs here, and fix the return
101	 * %rip in trap() so that we return immediately after the
102	 * swapgs in the sysenter handler to avoid doing the swapgs again.
103	 *
104	 * Nobody said that the design of sysenter was particularly
105	 * elegant, did they?
106	 */
107	ENTRY_NP(dbgtrap)
108	pushq	%r11
109	leaq	sys_sysenter(%rip), %r11
110	cmpq	%r11, 8(%rsp)
111	jne	1f
112	swapgs
1131:	popq	%r11
114	TRAP_NOERR(T_SGLSTP)	/* $1 */
115	jmp	cmntrap
116	SET_SIZE(dbgtrap)
117
118#elif defined(__i386)
119	/*
120	 * #DB
121	 */
122	ENTRY_NP(dbgtrap)
123	TRAP_NOERR(T_SGLSTP)	/* $1 */
124	jmp	cmntrap
125	SET_SIZE(dbgtrap)
126#endif
127
128#if defined(__amd64)
129
130/*
131 * Macro to set the gsbase or kgsbase to the address of the struct cpu
132 * for this processor.  If we came from userland, set kgsbase else clear
133 * gs and set gsbase.  We find the proper cpu struct by looping through
134 * the cpu structs for all processors till we find a match for the gdt
135 * of the trapping processor.  The stack is expected to be pointing at
136 * The standard regs pushed by hardware on a trap (plus error code and trapno).
137 */
138#define	SET_CPU_GSBASE							\
139	subq	$REGOFF_TRAPNO, %rsp;	/* save regs */			\
140	movq	%rax, REGOFF_RAX(%rsp);					\
141	movq	%rbx, REGOFF_RBX(%rsp);					\
142	movq	%rcx, REGOFF_RCX(%rsp);					\
143	movq	%rdx, REGOFF_RDX(%rsp);					\
144	movq	%rbp, REGOFF_RBP(%rsp);					\
145	movq	%rsp, %rbp;						\
146	subq	$16, %rsp;		/* space for gdt */		\
147	sgdt	6(%rsp);						\
148	movq	8(%rsp), %rcx;		/* %rcx has gdt to match */	\
149	xorl	%ebx, %ebx;		/* loop index */		\
150	leaq	cpu(%rip), %rdx;	/* cpu pointer array */		\
1511:									\
152	movq	(%rdx, %rbx, CLONGSIZE), %rax;	/* get cpu[i] */	\
153	cmpq	$0x0, %rax;		/* cpu[i] == NULL ? */		\
154	je	2f;			/* yes, continue */		\
155	cmpq	%rcx, CPU_GDT(%rax);	/* gdt == cpu[i]->cpu_gdt ? */	\
156	je	3f;			/* yes, go set gsbase */	\
1572:									\
158	incl	%ebx;			/* i++ */			\
159	cmpl	$NCPU, %ebx;		/* i < NCPU ? */		\
160	jb	1b;			/* yes, loop */			\
161/* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */	\
1623:									\
163	movl	$MSR_AMD_KGSBASE, %ecx;					\
164	cmpw	$KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */	\
165	jne	4f;			/* no, go set KGSBASE */	\
166	movl	$MSR_AMD_GSBASE, %ecx;	/* yes, set GSBASE */		\
167        mfence;				/* OPTERON_ERRATUM_88 */	\
1684:									\
169	movq	%rax, %rdx;		/* write base register */	\
170	shrq	$32, %rdx;						\
171	wrmsr;								\
172	movq	REGOFF_RDX(%rbp), %rdx;	/* restore regs */		\
173	movq	REGOFF_RCX(%rbp), %rcx;					\
174	movq	REGOFF_RBX(%rbp), %rbx;					\
175	movq	REGOFF_RAX(%rbp), %rax;					\
176	movq	%rbp, %rsp;						\
177	movq	REGOFF_RBP(%rsp), %rbp;					\
178	addq	$REGOFF_TRAPNO, %rsp	/* pop stack */
179#endif	/* __amd64 */
180
181
182
183
184	.globl	nmivect
185	.globl	idt0_default_r
186
187#if defined(__amd64)
188
189	/*
190	 * #NMI
191	 */
192	ENTRY_NP(nmiint)
193	TRAP_NOERR(T_NMIFLT)	/* $2 */
194
195	SET_CPU_GSBASE
196
197	/*
198	 * Save all registers and setup segment registers
199	 * with kernel selectors.
200	 */
201	INTR_PUSH
202
203	DISABLE_INTR_FLAGS		/* and set the kernel flags */
204
205	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
206
207	TRACE_REGS(%r12, %rsp, %rax, %rbx)
208	TRACE_STAMP(%r12)
209
210	movq	%rsp, %rbp
211
212	movq	%rbp, %rdi
213	call	av_dispatch_nmivect
214
215	INTR_POP
216	iretq
217	SET_SIZE(nmiint)
218
219#elif defined(__i386)
220
221	/*
222	 * #NMI
223	 */
224	ENTRY_NP(nmiint)
225	TRAP_NOERR(T_NMIFLT)	/* $2 */
226
227	/*
228	 * Save all registers and setup segment registers
229	 * with kernel selectors.
230	 */
231	INTR_PUSH
232
233	/*
234	 * setup pointer to reg struct as 2nd argument.
235	 */
236	movl	%esp, %ebp
237	pushl	%ebp
238
239	DISABLE_INTR_FLAGS
240
241	movl	nmivect, %esi		/* get autovect structure */
242loop1:
243	cmpl	$0, %esi		/* if pointer is null  */
244	je	.intr_ret		/* 	we're done */
245	movl	AV_VECTOR(%esi), %edx	/* get the interrupt routine */
246	pushl	AV_INTARG1(%esi)	/* get argument to interrupt routine */
247	call	*%edx			/* call interrupt routine with arg */
248	addl	$4, %esp
249	movl	AV_LINK(%esi), %esi	/* get next routine on list */
250	jmp	loop1			/* keep looping until end of list */
251
252.intr_ret:
253	addl	$4, %esp		/* 'pop' %ebp */
254	INTR_POP_USER
255	iret
256	SET_SIZE(nmiint)
257
258#endif	/* __i386 */
259
260	/*
261	 * #BP
262	 */
263	ENTRY_NP(brktrap)
264#if defined(__amd64)
265	cmpw	$KCS_SEL, 8(%rsp)
266	je	bp_jmpud
267#endif
268
269	TRAP_NOERR(T_BPTFLT)	/* $3 */
270	jmp	dtrace_trap
271
272#if defined(__amd64)
273bp_jmpud:
274	/*
275	 * This is a breakpoint in the kernel -- it is very likely that this
276	 * is DTrace-induced.  To unify DTrace handling, we spoof this as an
277	 * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
278	 * we must decrement the trapping %rip to make it appear as a fault.
279	 * We then push a non-zero error code to indicate that this is coming
280	 * from #BP.
281	 */
282	decq	(%rsp)
283	push	$1			/* error code -- non-zero for #BP */
284	jmp	ud_kernel
285#endif
286
287	SET_SIZE(brktrap)
288
289	/*
290	 * #OF
291	 */
292	ENTRY_NP(ovflotrap)
293	TRAP_NOERR(T_OVFLW)	/* $4 */
294	jmp	cmntrap
295	SET_SIZE(ovflotrap)
296
297	/*
298	 * #BR
299	 */
300	ENTRY_NP(boundstrap)
301	TRAP_NOERR(T_BOUNDFLT)	/* $5 */
302	jmp	cmntrap
303	SET_SIZE(boundstrap)
304
305#if defined(__amd64)
306
307	ENTRY_NP(invoptrap)
308	cmpw	$KCS_SEL, 8(%rsp)
309	jne	ud_user
310
311	push	$0			/* error code -- zero for #UD */
312ud_kernel:
313	push	$0xdddd			/* a dummy trap number */
314	TRAP_PUSH
315	movq	REGOFF_RIP(%rsp), %rdi
316	movq	REGOFF_RSP(%rsp), %rsi
317	movq	REGOFF_RAX(%rsp), %rdx
318	pushq	(%rsi)
319	movq	%rsp, %rsi
320	call	dtrace_invop
321	ALTENTRY(dtrace_invop_callsite)
322	addq	$8, %rsp
323	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
324	je	ud_push
325	cmpl	$DTRACE_INVOP_LEAVE, %eax
326	je	ud_leave
327	cmpl	$DTRACE_INVOP_NOP, %eax
328	je	ud_nop
329	cmpl	$DTRACE_INVOP_RET, %eax
330	je	ud_ret
331	jmp	ud_trap
332
333ud_push:
334	/*
335	 * We must emulate a "pushq %rbp".  To do this, we pull the stack
336	 * down 8 bytes, and then store the base pointer.
337	 */
338	INTR_POP
339	subq	$16, %rsp		/* make room for %rbp */
340	pushq	%rax			/* push temp */
341	movq	24(%rsp), %rax		/* load calling RIP */
342	addq	$1, %rax		/* increment over trapping instr */
343	movq	%rax, 8(%rsp)		/* store calling RIP */
344	movq	32(%rsp), %rax		/* load calling CS */
345	movq	%rax, 16(%rsp)		/* store calling CS */
346	movq	40(%rsp), %rax		/* load calling RFLAGS */
347	movq	%rax, 24(%rsp)		/* store calling RFLAGS */
348	movq	48(%rsp), %rax		/* load calling RSP */
349	subq	$8, %rax		/* make room for %rbp */
350	movq	%rax, 32(%rsp)		/* store calling RSP */
351	movq	56(%rsp), %rax		/* load calling SS */
352	movq	%rax, 40(%rsp)		/* store calling SS */
353	movq	32(%rsp), %rax		/* reload calling RSP */
354	movq	%rbp, (%rax)		/* store %rbp there */
355	popq	%rax			/* pop off temp */
356	iretq				/* return from interrupt */
357
358ud_leave:
359	/*
360	 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
361	 * followed by a "popq %rbp".  This is quite a bit simpler on amd64
362	 * than it is on i386 -- we can exploit the fact that the %rsp is
363	 * explicitly saved to effect the pop without having to reshuffle
364	 * the other data pushed for the trap.
365	 */
366	INTR_POP
367	pushq	%rax			/* push temp */
368	movq	8(%rsp), %rax		/* load calling RIP */
369	addq	$1, %rax		/* increment over trapping instr */
370	movq	%rax, 8(%rsp)		/* store calling RIP */
371	movq	(%rbp), %rax		/* get new %rbp */
372	addq	$8, %rbp		/* adjust new %rsp */
373	movq	%rbp, 32(%rsp)		/* store new %rsp */
374	movq	%rax, %rbp		/* set new %rbp */
375	popq	%rax			/* pop off temp */
376	iretq				/* return from interrupt */
377
378ud_nop:
379	/*
380	 * We must emulate a "nop".  This is obviously not hard:  we need only
381	 * advance the %rip by one.
382	 */
383	INTR_POP
384	incq	(%rsp)
385	iretq
386
387ud_ret:
388	INTR_POP
389	pushq	%rax			/* push temp */
390	movq	32(%rsp), %rax		/* load %rsp */
391	movq	(%rax), %rax		/* load calling RIP */
392	movq	%rax, 8(%rsp)		/* store calling RIP */
393	addq	$8, 32(%rsp)		/* adjust new %rsp */
394	popq	%rax			/* pop off temp */
395	iretq				/* return from interrupt */
396
397ud_trap:
398	/*
399	 * We're going to let the kernel handle this as a normal #UD.  If,
400	 * however, we came through #BP and are spoofing #UD (in this case,
401	 * the stored error value will be non-zero), we need to de-spoof
402	 * the trap by incrementing %rip and pushing T_BPTFLT.
403	 */
404	cmpq	$0, REGOFF_ERR(%rsp)
405	je	ud_ud
406	incq	REGOFF_RIP(%rsp)
407	addq	$REGOFF_RIP, %rsp
408	TRAP_NOERR(T_BPTFLT)	/* $3 */
409	jmp	cmntrap
410
411ud_ud:
412	addq	$REGOFF_RIP, %rsp
413ud_user:
414	TRAP_NOERR(T_ILLINST)
415	jmp	cmntrap
416	SET_SIZE(invoptrap)
417
418#elif defined(__i386)
419
420	/*
421	 * #UD
422	 */
423	ENTRY_NP(invoptrap)
424	/*
425	 * If we are taking an invalid opcode trap while in the kernel, this
426	 * is likely an FBT probe point.
427	 */
428	pushl   %gs
429	cmpw	$KGS_SEL, (%esp)
430	jne	8f
431	addl	$4, %esp
432	pusha
433	pushl	%eax			/* push %eax -- may be return value */
434	pushl	%esp			/* push stack pointer */
435	addl	$48, (%esp)		/* adjust to incoming args */
436	pushl	40(%esp)		/* push calling EIP */
437	call	dtrace_invop
438	ALTENTRY(dtrace_invop_callsite)
439	addl	$12, %esp
440	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
441	je	1f
442	cmpl	$DTRACE_INVOP_POPL_EBP, %eax
443	je	2f
444	cmpl	$DTRACE_INVOP_LEAVE, %eax
445	je	3f
446	cmpl	$DTRACE_INVOP_NOP, %eax
447	je	4f
448	jmp	7f
449
4501:
451	/*
452	 * We must emulate a "pushl %ebp".  To do this, we pull the stack
453	 * down 4 bytes, and then store the base pointer.
454	 */
455	popa
456	subl	$4, %esp		/* make room for %ebp */
457	pushl	%eax			/* push temp */
458	movl	8(%esp), %eax		/* load calling EIP */
459	incl	%eax			/* increment over LOCK prefix */
460	movl	%eax, 4(%esp)		/* store calling EIP */
461	movl	12(%esp), %eax		/* load calling CS */
462	movl	%eax, 8(%esp)		/* store calling CS */
463	movl	16(%esp), %eax		/* load calling EFLAGS */
464	movl	%eax, 12(%esp)		/* store calling EFLAGS */
465	movl	%ebp, 16(%esp)		/* push %ebp */
466	popl	%eax			/* pop off temp */
467	iret				/* return from interrupt */
468
4692:
470	/*
471	 * We must emulate a "popl %ebp".  To do this, we do the opposite of
472	 * the above:  we remove the %ebp from the stack, and squeeze up the
473	 * saved state from the trap.
474	 */
475	popa
476	pushl	%eax			/* push temp */
477	movl	16(%esp), %ebp		/* pop %ebp */
478	movl	12(%esp), %eax		/* load calling EFLAGS */
479	movl	%eax, 16(%esp)		/* store calling EFLAGS */
480	movl	8(%esp), %eax		/* load calling CS */
481	movl	%eax, 12(%esp)		/* store calling CS */
482	movl	4(%esp), %eax		/* load calling EIP */
483	incl	%eax			/* increment over LOCK prefix */
484	movl	%eax, 8(%esp)		/* store calling EIP */
485	popl	%eax			/* pop off temp */
486	addl	$4, %esp		/* adjust stack pointer */
487	iret				/* return from interrupt */
488
4893:
490	/*
491	 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
492	 * followed by a "popl %ebp".  This looks similar to the above, but
493	 * requires two temporaries:  one for the new base pointer, and one
494	 * for the staging register.
495	 */
496	popa
497	pushl	%eax			/* push temp */
498	pushl	%ebx			/* push temp */
499	movl	%ebp, %ebx		/* set temp to old %ebp */
500	movl	(%ebx), %ebp		/* pop %ebp */
501	movl	16(%esp), %eax		/* load calling EFLAGS */
502	movl	%eax, (%ebx)		/* store calling EFLAGS */
503	movl	12(%esp), %eax		/* load calling CS */
504	movl	%eax, -4(%ebx)		/* store calling CS */
505	movl	8(%esp), %eax		/* load calling EIP */
506	incl	%eax			/* increment over LOCK prefix */
507	movl	%eax, -8(%ebx)		/* store calling EIP */
508	movl	%ebx, -4(%esp)		/* temporarily store new %esp */
509	popl	%ebx			/* pop off temp */
510	popl	%eax			/* pop off temp */
511	movl	-12(%esp), %esp		/* set stack pointer */
512	subl	$8, %esp		/* adjust for three pushes, one pop */
513	iret				/* return from interrupt */
514
5154:
516	/*
517	 * We must emulate a "nop".  This is obviously not hard:  we need only
518	 * advance the %eip by one.
519	 */
520	popa
521	incl	(%esp)
522	iret
523
5247:
525	popa
526	pushl	$0
527	pushl	$T_ILLINST	/* $6 */
528	jmp	cmntrap
5298:
530	addl	$4, %esp
531	pushl	$0
532	pushl	$T_ILLINST	/* $6 */
533	jmp	cmntrap
534	SET_SIZE(invoptrap)
535
536#endif	/* __i386 */
537
538#if defined(__amd64)
539
540	/*
541	 * #NM
542	 */
543	ENTRY_NP(ndptrap)
544	/*
545	 * We want to do this quickly as every lwp using fp will take this
546	 * after a context switch -- we do the frequent path in ndptrap_frstor
547	 * below; for all other cases, we let the trap code handle it
548	 */
549	pushq	%rax
550	pushq	%rbx
551	cmpw    $KCS_SEL, 24(%rsp)	/* did we come from kernel mode? */
552	jne     1f
553	LOADCPU(%rbx)			/* if yes, don't swapgs */
554	jmp	2f
5551:
556	swapgs				/* if from user, need swapgs */
557	LOADCPU(%rbx)
558	swapgs
5592:
560	cmpl	$0, fpu_exists(%rip)
561	je	.handle_in_trap		/* let trap handle no fp case */
562	movq	CPU_THREAD(%rbx), %rax	/* %rax = curthread */
563	movl	$FPU_EN, %ebx
564	movq	T_LWP(%rax), %rax	/* %rax = lwp */
565	testq	%rax, %rax
566	jz	.handle_in_trap		/* should not happen? */
567#if LWP_PCB_FPU	!= 0
568	addq	$LWP_PCB_FPU, %rax	/* &lwp->lwp_pcb.pcb_fpu */
569#endif
570	testl	%ebx, PCB_FPU_FLAGS(%rax)
571	jz	.handle_in_trap		/* must be the first fault */
572	clts
573	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rax)
574#if FPU_CTX_FPU_REGS != 0
575	addq	$FPU_CTX_FPU_REGS, %rax
576#endif
577	/*
578	 * the label below is used in trap.c to detect FP faults in
579	 * kernel due to user fault.
580	 */
581	ALTENTRY(ndptrap_frstor)
582	fxrstor	(%rax)
583	popq	%rbx
584	popq	%rax
585	iretq
586
587.handle_in_trap:
588	popq	%rbx
589	popq	%rax
590	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
591	jmp	cmninttrap
592	SET_SIZE(ndptrap_frstor)
593	SET_SIZE(ndptrap)
594
595#elif defined(__i386)
596
597	ENTRY_NP(ndptrap)
598	/*
599	 * We want to do this quickly as every lwp using fp will take this
600	 * after a context switch -- we do the frequent path in fpnoextflt
601	 * below; for all other cases, we let the trap code handle it
602	 */
603	pushl	%eax
604	pushl	%ebx
605	pushl	%ds
606	pushl	%gs
607	movl	$KDS_SEL, %ebx
608	movw	%bx, %ds
609	movl	$KGS_SEL, %eax
610	movw	%ax, %gs
611	LOADCPU(%ebx)
612	cmpl	$0, fpu_exists
613	je	.handle_in_trap		/* let trap handle no fp case */
614	movl	CPU_THREAD(%ebx), %eax	/* %eax = curthread */
615	movl	$FPU_EN, %ebx
616	movl	T_LWP(%eax), %eax	/* %eax = lwp */
617	testl	%eax, %eax
618	jz	.handle_in_trap		/* should not happen? */
619#if LWP_PCB_FPU != 0
620	addl	$LWP_PCB_FPU, %eax 	/* &lwp->lwp_pcb.pcb_fpu */
621#endif
622	testl	%ebx, PCB_FPU_FLAGS(%eax)
623	jz	.handle_in_trap		/* must be the first fault */
624	clts
625	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%eax)
626#if FPU_CTX_FPU_REGS != 0
627	addl	$FPU_CTX_FPU_REGS, %eax
628#endif
629	/*
630	 * the label below is used in trap.c to detect FP faults in kernel
631	 * due to user fault.
632	 */
633	ALTENTRY(ndptrap_frstor)
634	.globl	_patch_fxrstor_eax
635_patch_fxrstor_eax:
636	frstor	(%eax)		/* may be patched to fxrstor */
637	nop			/* (including this byte) */
638	popl	%gs
639	popl	%ds
640	popl	%ebx
641	popl	%eax
642	iret
643
644.handle_in_trap:
645	popl	%gs
646	popl	%ds
647	popl	%ebx
648	popl	%eax
649	pushl	$0
650	pushl	$T_NOEXTFLT	/* $7 */
651	jmp	cmninttrap
652	SET_SIZE(ndptrap_frstor)
653	SET_SIZE(ndptrap)
654
655#endif	/* __i386 */
656
657#if defined(__amd64)
658
659	/*
660	 * #DF
661	 */
662	ENTRY_NP(syserrtrap)
663	pushq	$T_DBLFLT
664
665	SET_CPU_GSBASE
666
667	/*
668	 * We share this handler with kmdb (if kmdb is loaded).  As such, we may
669	 * have reached this point after encountering a #df in kmdb.  If that
670	 * happens, we'll still be on kmdb's IDT.  We need to switch back to this
671	 * CPU's IDT before proceeding.  Furthermore, if we did arrive here from
672	 * kmdb, kmdb is probably in a very sickly state, and shouldn't be
673	 * entered from the panic flow.  We'll suppress that entry by setting
674	 * nopanicdebug.
675	 */
676	pushq	%rax
677	subq	$DESCTBR_SIZE, %rsp
678	sidt	(%rsp)
679	movq	%gs:CPU_IDT, %rax
680	cmpq	%rax, DTR_BASE(%rsp)
681	je	1f
682
683	movq	%rax, DTR_BASE(%rsp)
684	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
685	lidt	(%rsp)
686
687	movl	$1, nopanicdebug
688
6891:	addq	$DESCTBR_SIZE, %rsp
690	popq	%rax
691
692	DFTRAP_PUSH
693
694	/*
695	 * freeze trap trace.
696	 */
697#ifdef TRAPTRACE
698	leaq	trap_trace_freeze(%rip), %r11
699	incl	(%r11)
700#endif
701
702	ENABLE_INTR_FLAGS
703
704	movq	%rsp, %rdi	/* &regs */
705	xorl	%esi, %esi	/* clear address */
706	xorl	%edx, %edx	/* cpuid = 0 */
707	call	trap
708
709	SET_SIZE(syserrtrap)
710
711#elif defined(__i386)
712
713	/*
714	 * #DF
715	 */
716	ENTRY_NP(syserrtrap)
717	cli				/* disable interrupts */
718
719	/*
720	 * We share this handler with kmdb (if kmdb is loaded).  As such, we may
721	 * have reached this point after encountering a #df in kmdb.  If that
722	 * happens, we'll still be on kmdb's IDT.  We need to switch back to this
723	 * CPU's IDT before proceeding.  Furthermore, if we did arrive here from
724	 * kmdb, kmdb is probably in a very sickly state, and shouldn't be
725	 * entered from the panic flow.  We'll suppress that entry by setting
726	 * nopanicdebug.
727	 */
728	subl	$DESCTBR_SIZE, %esp
729	movl	%gs:CPU_IDT, %eax
730	sidt	(%esp)
731	cmpl	DTR_BASE(%esp), %eax
732	je	1f
733
734	movl	%eax, DTR_BASE(%esp)
735	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
736	lidt	(%esp)
737
738	movl	$1, nopanicdebug
739
7401:	addl	$DESCTBR_SIZE, %esp
741
742	/*
743	 * Check the CPL in the TSS to see what mode
744	 * (user or kernel) we took the fault in.  At this
745	 * point we are running in the context of the double
746	 * fault task (dftss) but the CPU's task points to
747	 * the previous task (ktss) where the process context
748	 * has been saved as the result of the task switch.
749	 */
750	movl	%gs:CPU_TSS, %eax	/* get the TSS */
751	movl	TSS_SS(%eax), %ebx	/* save the fault SS */
752	movl	TSS_ESP(%eax), %edx	/* save the fault ESP */
753	testw	$CPL_MASK, TSS_CS(%eax)	/* user mode ? */
754	jz	make_frame
755	movw	TSS_SS0(%eax), %ss	/* get on the kernel stack */
756	movl	TSS_ESP0(%eax), %esp
757
758	/*
759	 * Clear the NT flag to avoid a task switch when the process
760	 * finally pops the EFL off the stack via an iret.  Clear
761	 * the TF flag since that is what the processor does for
762	 * a normal exception. Clear the IE flag so that interrupts
763	 * remain disabled.
764	 */
765	movl	TSS_EFL(%eax), %ecx
766	andl	$_BITNOT(PS_NT|PS_T|PS_IE), %ecx
767	pushl	%ecx
768	popfl				/* restore the EFL */
769	movw	TSS_LDT(%eax), %cx	/* restore the LDT */
770	lldt	%cx
771
772	/*
773	 * Restore process segment selectors.
774	 */
775	movw	TSS_DS(%eax), %ds
776	movw	TSS_ES(%eax), %es
777	movw	TSS_FS(%eax), %fs
778	movw	TSS_GS(%eax), %gs
779
780	/*
781	 * Restore task segment selectors.
782	 */
783	movl	$KDS_SEL, TSS_DS(%eax)
784	movl	$KDS_SEL, TSS_ES(%eax)
785	movl	$KDS_SEL, TSS_SS(%eax)
786	movl	$KFS_SEL, TSS_FS(%eax)
787	movl	$KGS_SEL, TSS_GS(%eax)
788
789	/*
790	 * Clear the TS bit, the busy bits in both task
791	 * descriptors, and switch tasks.
792	 */
793	clts
794	leal	gdt0, %ecx
795	movl	DFTSS_SEL+4(%ecx), %esi
796	andl	$_BITNOT(0x200), %esi
797	movl	%esi, DFTSS_SEL+4(%ecx)
798	movl	KTSS_SEL+4(%ecx), %esi
799	andl	$_BITNOT(0x200), %esi
800	movl	%esi, KTSS_SEL+4(%ecx)
801	movw	$KTSS_SEL, %cx
802	ltr	%cx
803
804	/*
805	 * Restore part of the process registers.
806	 */
807	movl	TSS_EBP(%eax), %ebp
808	movl	TSS_ECX(%eax), %ecx
809	movl	TSS_ESI(%eax), %esi
810	movl	TSS_EDI(%eax), %edi
811
812make_frame:
813	/*
814	 * Make a trap frame.  Leave the error code (0) on
815	 * the stack since the first word on a trap stack is
816	 * unused anyway.
817	 */
818	pushl	%ebx			/ fault SS
819	pushl	%edx			/ fault ESP
820	pushl	TSS_EFL(%eax)		/ fault EFL
821	pushl	TSS_CS(%eax)		/ fault CS
822	pushl	TSS_EIP(%eax)		/ fault EIP
823	pushl	$0			/ error code
824	pushl	$T_DBLFLT		/ trap number 8
825	movl	TSS_EBX(%eax), %ebx	/ restore EBX
826	movl	TSS_EDX(%eax), %edx	/ restore EDX
827	movl	TSS_EAX(%eax), %eax	/ restore EAX
828	sti				/ enable interrupts
829	jmp	cmntrap
830	SET_SIZE(syserrtrap)
831
832#endif	/* __i386 */
833
834	ENTRY_NP(overrun)
835	push	$0
836	TRAP_NOERR(T_EXTOVRFLT)	/* $9 i386 only - not generated */
837	jmp	cmninttrap
838	SET_SIZE(overrun)
839
840	/*
841	 * #TS
842	 */
843	ENTRY_NP(invtsstrap)
844	TRAP_ERR(T_TSSFLT)	/* $10 already have error code on stack */
845	jmp	cmntrap
846	SET_SIZE(invtsstrap)
847
848	/*
849	 * #NP
850	 */
851	ENTRY_NP(segnptrap)
852	TRAP_ERR(T_SEGFLT)	/* $11 already have error code on stack */
853#if defined(__amd64)
854	SET_CPU_GSBASE
855#endif
856	jmp	cmntrap
857	SET_SIZE(segnptrap)
858
859	/*
860	 * #SS
861	 */
862	ENTRY_NP(stktrap)
863	TRAP_ERR(T_STKFLT)	/* $12 already have error code on stack */
864	jmp	cmntrap
865	SET_SIZE(stktrap)
866
867	/*
868	 * #GP
869	 */
870	ENTRY_NP(gptrap)
871	TRAP_ERR(T_GPFLT)	/* $13 already have error code on stack */
872#if defined(__amd64)
873	SET_CPU_GSBASE
874#endif
875	jmp	cmntrap
876	SET_SIZE(gptrap)
877
878	/*
879	 * #PF
880	 */
881	ENTRY_NP(pftrap)
882	TRAP_ERR(T_PGFLT)	/* $14 already have error code on stack */
883	jmp	cmntrap
884	SET_SIZE(pftrap)
885
886#if !defined(__amd64)
887
888	/*
889	 * #PF pentium bug workaround
890	 */
891	ENTRY_NP(pentium_pftrap)
892	pushl	%eax
893	movl	%cr2, %eax
894	andl	$MMU_STD_PAGEMASK, %eax
895
896	cmpl	%eax, %cs:idt0_default_r+2	/* fixme */
897
898	je	check_for_user_address
899user_mode:
900	popl	%eax
901	pushl	$T_PGFLT	/* $14 */
902	jmp	cmntrap
903check_for_user_address:
904	/*
905	 * Before we assume that we have an unmapped trap on our hands,
906	 * check to see if this is a fault from user mode.  If it is,
907	 * we'll kick back into the page fault handler.
908	 */
909	movl	4(%esp), %eax	/* error code */
910	andl	$PF_ERR_USER, %eax
911	jnz	user_mode
912
913	/*
914	 * We now know that this is the invalid opcode trap.
915	 */
916	popl	%eax
917	addl	$4, %esp	/* pop error code */
918	jmp	invoptrap
919	SET_SIZE(pentium_pftrap)
920
921#endif	/* !__amd64 */
922
923	ENTRY_NP(resvtrap)
924	TRAP_NOERR(15)		/* (reserved)  */
925	jmp	cmntrap
926	SET_SIZE(resvtrap)
927
928	/*
929	 * #MF
930	 */
931	ENTRY_NP(ndperr)
932	TRAP_NOERR(T_EXTERRFLT)	/* $16 */
933	jmp	cmninttrap
934	SET_SIZE(ndperr)
935
936	/*
937	 * #AC
938	 */
939	ENTRY_NP(achktrap)
940	TRAP_ERR(T_ALIGNMENT)	/* $17 */
941	jmp	cmntrap
942	SET_SIZE(achktrap)
943
944	/*
945	 * #MC
946	 */
947	.globl	cmi_mca_trap	/* see uts/i86pc/os/cmi.c */
948
949#if defined(__amd64)
950
951	ENTRY_NP(mcetrap)
952	TRAP_NOERR(T_MCE)	/* $18 */
953	SET_CPU_GSBASE
954	INTR_PUSH
955
956	TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
957	TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
958	TRACE_STAMP(%rdi)
959
960	DISABLE_INTR_FLAGS
961	movq	%rsp, %rbp
962
963	movq	%rsp, %rdi	/* arg0 = struct regs *rp */
964	call	cmi_mca_trap	/* cmi_mca_trap(rp); */
965
966	jmp	_sys_rtt
967	SET_SIZE(mcetrap)
968
969#else
970
971	ENTRY_NP(mcetrap)
972	TRAP_NOERR(T_MCE)	/* $18 */
973	INTR_PUSH
974
975	DISABLE_INTR_FLAGS
976	movl	%esp, %ebp
977
978	movl	%esp, %ecx
979	pushl	%ecx		/* arg0 = struct regs *rp */
980	call	cmi_mca_trap	/* cmi_mca_trap(rp) */
981	addl	$4, %esp	/* pop arg0 */
982
983	jmp	_sys_rtt
984	SET_SIZE(mcetrap)
985
986#endif
987
988	/*
989	 * #XF
990	 */
991	ENTRY_NP(xmtrap)
992	TRAP_NOERR(T_SIMDFPE)	/* $19 */
993	jmp	cmntrap
994	SET_SIZE(xmtrap)
995
996	ENTRY_NP(invaltrap)
997	TRAP_NOERR(30)		/* very invalid */
998	jmp	cmntrap
999	SET_SIZE(invaltrap)
1000
1001	ENTRY_NP(invalint)
1002	TRAP_NOERR(31)		/* even more so */
1003	jmp	cmnint
1004	SET_SIZE(invalint)
1005
1006	.globl	fasttable
1007
1008#if defined(__amd64)
1009
1010	ENTRY_NP(fasttrap)
1011	cmpl	$T_LASTFAST, %eax
1012	ja	1f
1013	orl	%eax, %eax	/* (zero extend top 32-bits) */
1014	leaq	fasttable(%rip), %r11
1015	leaq	(%r11, %rax, CLONGSIZE), %r11
1016	jmp	*(%r11)
10171:
1018	/*
1019	 * Fast syscall number was illegal.  Make it look
1020	 * as if the INT failed.  Modify %rip to point before the
1021	 * INT, push the expected error code and fake a GP fault.
1022	 *
1023	 * XXX Why make the error code be offset into idt + 1?
1024	 * Instead we should push a real (soft?) error code
1025	 * on the stack and #gp handler could know about fasttraps?
1026	 */
1027	subq	$2, (%rsp)	/* XXX int insn 2-bytes */
1028	pushq	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1029	jmp	gptrap
1030	SET_SIZE(fasttrap)
1031
1032#elif defined(__i386)
1033
1034	ENTRY_NP(fasttrap)
1035	cmpl	$T_LASTFAST, %eax
1036	ja	1f
1037	jmp	*%cs:fasttable(, %eax, CLONGSIZE)
10381:
1039	/*
1040	 * Fast syscall number was illegal.  Make it look
1041	 * as if the INT failed.  Modify %eip to point before the
1042	 * INT, push the expected error code and fake a GP fault.
1043	 *
1044	 * XXX Why make the error code be offset into idt + 1?
1045	 * Instead we should push a real (soft?) error code
1046	 * on the stack and #gp handler could know about fasttraps?
1047	 */
1048	subl	$2, (%esp)	/* XXX int insn 2-bytes */
1049	pushl	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1050	jmp	gptrap
1051	SET_SIZE(fasttrap)
1052
1053#endif	/* __i386 */
1054
1055	ENTRY_NP(dtrace_ret)
1056	TRAP_NOERR(T_DTRACE_RET)
1057	jmp	dtrace_trap
1058	SET_SIZE(dtrace_ret)
1059
1060#if defined(__amd64)
1061
1062	/*
1063	 * RFLAGS 24 bytes up the stack from %rsp.
1064	 * XXX a constant would be nicer.
1065	 */
1066	ENTRY_NP(fast_null)
1067	orq	$PS_C, 24(%rsp)	/* set carry bit in user flags */
1068	iretq
1069	SET_SIZE(fast_null)
1070
1071#elif defined(__i386)
1072
1073	ENTRY_NP(fast_null)
1074	orw	$PS_C, 8(%esp)	/* set carry bit in user flags */
1075	iret
1076	SET_SIZE(fast_null)
1077
1078#endif	/* __i386 */
1079
1080	/*
1081	 * Interrupts start at 32
1082	 */
1083#define MKIVCT(n)			\
1084	ENTRY_NP(ivct/**/n)		\
1085	push	$0;			\
1086	push	$n - 0x20;		\
1087	jmp	cmnint;			\
1088	SET_SIZE(ivct/**/n)
1089
1090	MKIVCT(32)
1091	MKIVCT(33)
1092	MKIVCT(34)
1093	MKIVCT(35)
1094	MKIVCT(36)
1095	MKIVCT(37)
1096	MKIVCT(38)
1097	MKIVCT(39)
1098	MKIVCT(40)
1099	MKIVCT(41)
1100	MKIVCT(42)
1101	MKIVCT(43)
1102	MKIVCT(44)
1103	MKIVCT(45)
1104	MKIVCT(46)
1105	MKIVCT(47)
1106	MKIVCT(48)
1107	MKIVCT(49)
1108	MKIVCT(50)
1109	MKIVCT(51)
1110	MKIVCT(52)
1111	MKIVCT(53)
1112	MKIVCT(54)
1113	MKIVCT(55)
1114	MKIVCT(56)
1115	MKIVCT(57)
1116	MKIVCT(58)
1117	MKIVCT(59)
1118	MKIVCT(60)
1119	MKIVCT(61)
1120	MKIVCT(62)
1121	MKIVCT(63)
1122	MKIVCT(64)
1123	MKIVCT(65)
1124	MKIVCT(66)
1125	MKIVCT(67)
1126	MKIVCT(68)
1127	MKIVCT(69)
1128	MKIVCT(70)
1129	MKIVCT(71)
1130	MKIVCT(72)
1131	MKIVCT(73)
1132	MKIVCT(74)
1133	MKIVCT(75)
1134	MKIVCT(76)
1135	MKIVCT(77)
1136	MKIVCT(78)
1137	MKIVCT(79)
1138	MKIVCT(80)
1139	MKIVCT(81)
1140	MKIVCT(82)
1141	MKIVCT(83)
1142	MKIVCT(84)
1143	MKIVCT(85)
1144	MKIVCT(86)
1145	MKIVCT(87)
1146	MKIVCT(88)
1147	MKIVCT(89)
1148	MKIVCT(90)
1149	MKIVCT(91)
1150	MKIVCT(92)
1151	MKIVCT(93)
1152	MKIVCT(94)
1153	MKIVCT(95)
1154	MKIVCT(96)
1155	MKIVCT(97)
1156	MKIVCT(98)
1157	MKIVCT(99)
1158	MKIVCT(100)
1159	MKIVCT(101)
1160	MKIVCT(102)
1161	MKIVCT(103)
1162	MKIVCT(104)
1163	MKIVCT(105)
1164	MKIVCT(106)
1165	MKIVCT(107)
1166	MKIVCT(108)
1167	MKIVCT(109)
1168	MKIVCT(110)
1169	MKIVCT(111)
1170	MKIVCT(112)
1171	MKIVCT(113)
1172	MKIVCT(114)
1173	MKIVCT(115)
1174	MKIVCT(116)
1175	MKIVCT(117)
1176	MKIVCT(118)
1177	MKIVCT(119)
1178	MKIVCT(120)
1179	MKIVCT(121)
1180	MKIVCT(122)
1181	MKIVCT(123)
1182	MKIVCT(124)
1183	MKIVCT(125)
1184	MKIVCT(126)
1185	MKIVCT(127)
1186	MKIVCT(128)
1187	MKIVCT(129)
1188	MKIVCT(130)
1189	MKIVCT(131)
1190	MKIVCT(132)
1191	MKIVCT(133)
1192	MKIVCT(134)
1193	MKIVCT(135)
1194	MKIVCT(136)
1195	MKIVCT(137)
1196	MKIVCT(138)
1197	MKIVCT(139)
1198	MKIVCT(140)
1199	MKIVCT(141)
1200	MKIVCT(142)
1201	MKIVCT(143)
1202	MKIVCT(144)
1203	MKIVCT(145)
1204	MKIVCT(146)
1205	MKIVCT(147)
1206	MKIVCT(148)
1207	MKIVCT(149)
1208	MKIVCT(150)
1209	MKIVCT(151)
1210	MKIVCT(152)
1211	MKIVCT(153)
1212	MKIVCT(154)
1213	MKIVCT(155)
1214	MKIVCT(156)
1215	MKIVCT(157)
1216	MKIVCT(158)
1217	MKIVCT(159)
1218	MKIVCT(160)
1219	MKIVCT(161)
1220	MKIVCT(162)
1221	MKIVCT(163)
1222	MKIVCT(164)
1223	MKIVCT(165)
1224	MKIVCT(166)
1225	MKIVCT(167)
1226	MKIVCT(168)
1227	MKIVCT(169)
1228	MKIVCT(170)
1229	MKIVCT(171)
1230	MKIVCT(172)
1231	MKIVCT(173)
1232	MKIVCT(174)
1233	MKIVCT(175)
1234	MKIVCT(176)
1235	MKIVCT(177)
1236	MKIVCT(178)
1237	MKIVCT(179)
1238	MKIVCT(180)
1239	MKIVCT(181)
1240	MKIVCT(182)
1241	MKIVCT(183)
1242	MKIVCT(184)
1243	MKIVCT(185)
1244	MKIVCT(186)
1245	MKIVCT(187)
1246	MKIVCT(188)
1247	MKIVCT(189)
1248	MKIVCT(190)
1249	MKIVCT(191)
1250	MKIVCT(192)
1251	MKIVCT(193)
1252	MKIVCT(194)
1253	MKIVCT(195)
1254	MKIVCT(196)
1255	MKIVCT(197)
1256	MKIVCT(198)
1257	MKIVCT(199)
1258	MKIVCT(200)
1259	MKIVCT(201)
1260	MKIVCT(202)
1261	MKIVCT(203)
1262	MKIVCT(204)
1263	MKIVCT(205)
1264	MKIVCT(206)
1265	MKIVCT(207)
1266	MKIVCT(208)
1267	MKIVCT(209)
1268	MKIVCT(210)
1269	MKIVCT(211)
1270	MKIVCT(212)
1271	MKIVCT(213)
1272	MKIVCT(214)
1273	MKIVCT(215)
1274	MKIVCT(216)
1275	MKIVCT(217)
1276	MKIVCT(218)
1277	MKIVCT(219)
1278	MKIVCT(220)
1279	MKIVCT(221)
1280	MKIVCT(222)
1281	MKIVCT(223)
1282	MKIVCT(224)
1283	MKIVCT(225)
1284	MKIVCT(226)
1285	MKIVCT(227)
1286	MKIVCT(228)
1287	MKIVCT(229)
1288	MKIVCT(230)
1289	MKIVCT(231)
1290	MKIVCT(232)
1291	MKIVCT(233)
1292	MKIVCT(234)
1293	MKIVCT(235)
1294	MKIVCT(236)
1295	MKIVCT(237)
1296	MKIVCT(238)
1297	MKIVCT(239)
1298	MKIVCT(240)
1299	MKIVCT(241)
1300	MKIVCT(242)
1301	MKIVCT(243)
1302	MKIVCT(244)
1303	MKIVCT(245)
1304	MKIVCT(246)
1305	MKIVCT(247)
1306	MKIVCT(248)
1307	MKIVCT(249)
1308	MKIVCT(250)
1309	MKIVCT(251)
1310	MKIVCT(252)
1311	MKIVCT(253)
1312	MKIVCT(254)
1313	MKIVCT(255)
1314
1315#endif	/* __lint */
1316