xref: /titanic_44/usr/src/uts/intel/ia32/ml/exception.s (revision 4812581794004eff0af2b765b832403b30bf64ab)
1/*
2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
4 */
5
6/*
7 * Copyright (c) 1989, 1990 William F. Jolitz.
8 * Copyright (c) 1990 The Regents of the University of California.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
40 */
41
42#include <sys/asm_linkage.h>
43#include <sys/asm_misc.h>
44#include <sys/trap.h>
45#include <sys/psw.h>
46#include <sys/regset.h>
47#include <sys/privregs.h>
48#include <sys/dtrace.h>
49#include <sys/x86_archext.h>
50#include <sys/traptrace.h>
51#include <sys/machparam.h>
52
53/*
54 * only one routine in this file is interesting to lint
55 */
56
57#if defined(__lint)
58
59void
60ndptrap_frstor(void)
61{}
62
63#else
64
65#include "assym.h"
66
67/*
68 * push $0 on stack for traps that do not
69 * generate an error code. This is so the rest
70 * of the kernel can expect a consistent stack
71 * from from any exception.
72 *
73 * Note that for all exceptions for amd64
74 * %r11 and %rcx are on the stack. Just pop
75 * them back into their appropriate registers and let
76 * it get saved as is running native.
77 */
78
79#if defined(__xpv) && defined(__amd64)
80
81#define	NPTRAP_NOERR(trapno)	\
82	pushq	$0;		\
83	pushq	$trapno
84
85#define	TRAP_NOERR(trapno)	\
86	XPV_TRAP_POP;		\
87	NPTRAP_NOERR(trapno)
88
89/*
90 * error code already pushed by hw
91 * onto stack.
92 */
93#define	TRAP_ERR(trapno)	\
94	XPV_TRAP_POP;		\
95	pushq	$trapno
96
97#else /* __xpv && __amd64 */
98
99#define	TRAP_NOERR(trapno)	\
100	push	$0;		\
101	push	$trapno
102
103#define	NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
104
105/*
106 * error code already pushed by hw
107 * onto stack.
108 */
109#define	TRAP_ERR(trapno)	\
110	push	$trapno
111
112#endif	/* __xpv && __amd64 */
113
114
115	/*
116	 * #DE
117	 */
118	ENTRY_NP(div0trap)
119	TRAP_NOERR(T_ZERODIV)	/* $0 */
120	jmp	cmntrap
121	SET_SIZE(div0trap)
122
123	/*
124	 * #DB
125	 *
126	 * Fetch %dr6 and clear it, handing off the value to the
127	 * cmntrap code in %r15/%esi
128	 */
129	ENTRY_NP(dbgtrap)
130	TRAP_NOERR(T_SGLSTP)	/* $1 */
131
132#if defined(__amd64)
133#if !defined(__xpv)		/* no sysenter support yet */
134	/*
135	 * If we get here as a result of single-stepping a sysenter
136	 * instruction, we suddenly find ourselves taking a #db
137	 * in kernel mode -before- we've swapgs'ed.  So before we can
138	 * take the trap, we do the swapgs here, and fix the return
139	 * %rip in trap() so that we return immediately after the
140	 * swapgs in the sysenter handler to avoid doing the swapgs again.
141	 *
142	 * Nobody said that the design of sysenter was particularly
143	 * elegant, did they?
144	 */
145
146	pushq	%r11
147
148	/*
149	 * At this point the stack looks like this:
150	 *
151	 * (high address) 	r_ss
152	 *			r_rsp
153	 *			r_rfl
154	 *			r_cs
155	 *			r_rip		<-- %rsp + 24
156	 *			r_err		<-- %rsp + 16
157	 *			r_trapno	<-- %rsp + 8
158	 * (low address)	%r11		<-- %rsp
159	 */
160	leaq	sys_sysenter(%rip), %r11
161	cmpq	%r11, 24(%rsp)	/* Compare to saved r_rip on the stack */
162	je	1f
163	leaq	brand_sys_sysenter(%rip), %r11
164	cmpq	%r11, 24(%rsp)	/* Compare to saved r_rip on the stack */
165	jne	2f
1661:	SWAPGS
1672:	popq	%r11
168#endif	/* !__xpv */
169
170	INTR_PUSH
171#if defined(__xpv)
172	movl	$6, %edi
173	call	kdi_dreg_get
174	movq	%rax, %r15		/* %db6 -> %r15 */
175	movl	$6, %edi
176	movl	$0, %esi
177	call	kdi_dreg_set		/* 0 -> %db6 */
178#else
179	movq	%db6, %r15
180	xorl	%eax, %eax
181	movq	%rax, %db6
182#endif
183
184#elif defined(__i386)
185
186	INTR_PUSH
187#if defined(__xpv)
188	pushl	$6
189	call	kdi_dreg_get
190	addl	$4, %esp
191	movl	%eax, %esi		/* %dr6 -> %esi */
192	pushl	$0
193	pushl	$6
194	call	kdi_dreg_set		/* 0 -> %dr6 */
195	addl	$8, %esp
196#else
197	movl	%db6, %esi
198	xorl	%eax, %eax
199	movl	%eax, %db6
200#endif
201#endif	/* __i386 */
202
203	jmp	cmntrap_pushed
204	SET_SIZE(dbgtrap)
205
206#if defined(__amd64)
207#if !defined(__xpv)
208
209/*
210 * Macro to set the gsbase or kgsbase to the address of the struct cpu
211 * for this processor.  If we came from userland, set kgsbase else
212 * set gsbase.  We find the proper cpu struct by looping through
213 * the cpu structs for all processors till we find a match for the gdt
214 * of the trapping processor.  The stack is expected to be pointing at
215 * the standard regs pushed by hardware on a trap (plus error code and trapno).
216 */
217#define	SET_CPU_GSBASE							\
218	subq	$REGOFF_TRAPNO, %rsp;	/* save regs */			\
219	movq	%rax, REGOFF_RAX(%rsp);					\
220	movq	%rbx, REGOFF_RBX(%rsp);					\
221	movq	%rcx, REGOFF_RCX(%rsp);					\
222	movq	%rdx, REGOFF_RDX(%rsp);					\
223	movq	%rbp, REGOFF_RBP(%rsp);					\
224	movq	%rsp, %rbp;						\
225	subq	$16, %rsp;		/* space for gdt */		\
226	sgdt	6(%rsp);						\
227	movq	8(%rsp), %rcx;		/* %rcx has gdt to match */	\
228	xorl	%ebx, %ebx;		/* loop index */		\
229	leaq	cpu(%rip), %rdx;	/* cpu pointer array */		\
2301:									\
231	movq	(%rdx, %rbx, CLONGSIZE), %rax;	/* get cpu[i] */	\
232	cmpq	$0x0, %rax;		/* cpu[i] == NULL ? */		\
233	je	2f;			/* yes, continue */		\
234	cmpq	%rcx, CPU_GDT(%rax);	/* gdt == cpu[i]->cpu_gdt ? */	\
235	je	3f;			/* yes, go set gsbase */	\
2362:									\
237	incl	%ebx;			/* i++ */			\
238	cmpl	$NCPU, %ebx;		/* i < NCPU ? */		\
239	jb	1b;			/* yes, loop */			\
240/* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */	\
2413:									\
242	movl	$MSR_AMD_KGSBASE, %ecx;					\
243	cmpw	$KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */	\
244	jne	4f;			/* no, go set KGSBASE */	\
245	movl	$MSR_AMD_GSBASE, %ecx;	/* yes, set GSBASE */		\
246        mfence;				/* OPTERON_ERRATUM_88 */	\
2474:									\
248	movq	%rax, %rdx;		/* write base register */	\
249	shrq	$32, %rdx;						\
250	wrmsr;								\
251	movq	REGOFF_RDX(%rbp), %rdx;	/* restore regs */		\
252	movq	REGOFF_RCX(%rbp), %rcx;					\
253	movq	REGOFF_RBX(%rbp), %rbx;					\
254	movq	REGOFF_RAX(%rbp), %rax;					\
255	movq	%rbp, %rsp;						\
256	movq	REGOFF_RBP(%rsp), %rbp;					\
257	addq	$REGOFF_TRAPNO, %rsp	/* pop stack */
258
259#else	/* __xpv */
260
261#define	SET_CPU_GSBASE	/* noop on the hypervisor */
262
263#endif	/* __xpv */
264#endif	/* __amd64 */
265
266
267#if defined(__amd64)
268
269	/*
270	 * #NMI
271	 *
272	 * XXPV: See 6532669.
273	 */
274	ENTRY_NP(nmiint)
275	TRAP_NOERR(T_NMIFLT)	/* $2 */
276
277	SET_CPU_GSBASE
278
279	/*
280	 * Save all registers and setup segment registers
281	 * with kernel selectors.
282	 */
283	INTR_PUSH
284	INTGATE_INIT_KERNEL_FLAGS
285
286	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
287	TRACE_REGS(%r12, %rsp, %rax, %rbx)
288	TRACE_STAMP(%r12)
289
290	movq	%rsp, %rbp
291
292	movq	%rbp, %rdi
293	call	av_dispatch_nmivect
294
295	INTR_POP
296	IRET
297	/*NOTREACHED*/
298	SET_SIZE(nmiint)
299
300#elif defined(__i386)
301
302	/*
303	 * #NMI
304	 */
305	ENTRY_NP(nmiint)
306	TRAP_NOERR(T_NMIFLT)	/* $2 */
307
308	/*
309	 * Save all registers and setup segment registers
310	 * with kernel selectors.
311	 */
312	INTR_PUSH
313	INTGATE_INIT_KERNEL_FLAGS
314
315	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
316	TRACE_REGS(%edi, %esp, %ebx, %ecx)
317	TRACE_STAMP(%edi)
318
319	movl	%esp, %ebp
320
321	pushl	%ebp
322	call	av_dispatch_nmivect
323	addl	$4, %esp
324
325	INTR_POP_USER
326	IRET
327	SET_SIZE(nmiint)
328
329#endif	/* __i386 */
330
331	/*
332	 * #BP
333	 */
334	ENTRY_NP(brktrap)
335
336#if defined(__amd64)
337	XPV_TRAP_POP
338	cmpw	$KCS_SEL, 8(%rsp)
339	jne	bp_user
340
341	/*
342	 * This is a breakpoint in the kernel -- it is very likely that this
343	 * is DTrace-induced.  To unify DTrace handling, we spoof this as an
344	 * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
345	 * we must decrement the trapping %rip to make it appear as a fault.
346	 * We then push a non-zero error code to indicate that this is coming
347	 * from #BP.
348	 */
349	decq	(%rsp)
350	push	$1			/* error code -- non-zero for #BP */
351	jmp	ud_kernel
352
353bp_user:
354#endif /* __amd64 */
355
356	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
357	jmp	dtrace_trap
358
359	SET_SIZE(brktrap)
360
361	/*
362	 * #OF
363	 */
364	ENTRY_NP(ovflotrap)
365	TRAP_NOERR(T_OVFLW)	/* $4 */
366	jmp	cmntrap
367	SET_SIZE(ovflotrap)
368
369	/*
370	 * #BR
371	 */
372	ENTRY_NP(boundstrap)
373	TRAP_NOERR(T_BOUNDFLT)	/* $5 */
374	jmp	cmntrap
375	SET_SIZE(boundstrap)
376
377#if defined(__amd64)
378
379	ENTRY_NP(invoptrap)
380
381	XPV_TRAP_POP
382
383	cmpw	$KCS_SEL, 8(%rsp)
384	jne	ud_user
385
386#if defined(__xpv)
387	movb	$0, 12(%rsp)		/* clear saved upcall_mask from %cs */
388#endif
389	push	$0			/* error code -- zero for #UD */
390ud_kernel:
391	push	$0xdddd			/* a dummy trap number */
392	INTR_PUSH
393	movq	REGOFF_RIP(%rsp), %rdi
394	movq	REGOFF_RSP(%rsp), %rsi
395	movq	REGOFF_RAX(%rsp), %rdx
396	pushq	(%rsi)
397	movq	%rsp, %rsi
398	subq	$8, %rsp
399	call	dtrace_invop
400	ALTENTRY(dtrace_invop_callsite)
401	addq	$16, %rsp
402	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
403	je	ud_push
404	cmpl	$DTRACE_INVOP_LEAVE, %eax
405	je	ud_leave
406	cmpl	$DTRACE_INVOP_NOP, %eax
407	je	ud_nop
408	cmpl	$DTRACE_INVOP_RET, %eax
409	je	ud_ret
410	jmp	ud_trap
411
412ud_push:
413	/*
414	 * We must emulate a "pushq %rbp".  To do this, we pull the stack
415	 * down 8 bytes, and then store the base pointer.
416	 */
417	INTR_POP
418	subq	$16, %rsp		/* make room for %rbp */
419	pushq	%rax			/* push temp */
420	movq	24(%rsp), %rax		/* load calling RIP */
421	addq	$1, %rax		/* increment over trapping instr */
422	movq	%rax, 8(%rsp)		/* store calling RIP */
423	movq	32(%rsp), %rax		/* load calling CS */
424	movq	%rax, 16(%rsp)		/* store calling CS */
425	movq	40(%rsp), %rax		/* load calling RFLAGS */
426	movq	%rax, 24(%rsp)		/* store calling RFLAGS */
427	movq	48(%rsp), %rax		/* load calling RSP */
428	subq	$8, %rax		/* make room for %rbp */
429	movq	%rax, 32(%rsp)		/* store calling RSP */
430	movq	56(%rsp), %rax		/* load calling SS */
431	movq	%rax, 40(%rsp)		/* store calling SS */
432	movq	32(%rsp), %rax		/* reload calling RSP */
433	movq	%rbp, (%rax)		/* store %rbp there */
434	popq	%rax			/* pop off temp */
435	IRET				/* return from interrupt */
436	/*NOTREACHED*/
437
438ud_leave:
439	/*
440	 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
441	 * followed by a "popq %rbp".  This is quite a bit simpler on amd64
442	 * than it is on i386 -- we can exploit the fact that the %rsp is
443	 * explicitly saved to effect the pop without having to reshuffle
444	 * the other data pushed for the trap.
445	 */
446	INTR_POP
447	pushq	%rax			/* push temp */
448	movq	8(%rsp), %rax		/* load calling RIP */
449	addq	$1, %rax		/* increment over trapping instr */
450	movq	%rax, 8(%rsp)		/* store calling RIP */
451	movq	(%rbp), %rax		/* get new %rbp */
452	addq	$8, %rbp		/* adjust new %rsp */
453	movq	%rbp, 32(%rsp)		/* store new %rsp */
454	movq	%rax, %rbp		/* set new %rbp */
455	popq	%rax			/* pop off temp */
456	IRET				/* return from interrupt */
457	/*NOTREACHED*/
458
459ud_nop:
460	/*
461	 * We must emulate a "nop".  This is obviously not hard:  we need only
462	 * advance the %rip by one.
463	 */
464	INTR_POP
465	incq	(%rsp)
466	IRET
467	/*NOTREACHED*/
468
469ud_ret:
470	INTR_POP
471	pushq	%rax			/* push temp */
472	movq	32(%rsp), %rax		/* load %rsp */
473	movq	(%rax), %rax		/* load calling RIP */
474	movq	%rax, 8(%rsp)		/* store calling RIP */
475	addq	$8, 32(%rsp)		/* adjust new %rsp */
476	popq	%rax			/* pop off temp */
477	IRET				/* return from interrupt */
478	/*NOTREACHED*/
479
480ud_trap:
481	/*
482	 * We're going to let the kernel handle this as a normal #UD.  If,
483	 * however, we came through #BP and are spoofing #UD (in this case,
484	 * the stored error value will be non-zero), we need to de-spoof
485	 * the trap by incrementing %rip and pushing T_BPTFLT.
486	 */
487	cmpq	$0, REGOFF_ERR(%rsp)
488	je	ud_ud
489	incq	REGOFF_RIP(%rsp)
490	addq	$REGOFF_RIP, %rsp
491	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
492	jmp	cmntrap
493
494ud_ud:
495	addq	$REGOFF_RIP, %rsp
496ud_user:
497	NPTRAP_NOERR(T_ILLINST)
498	jmp	cmntrap
499	SET_SIZE(invoptrap)
500
501#elif defined(__i386)
502
503	/*
504	 * #UD
505	 */
506	ENTRY_NP(invoptrap)
507	/*
508	 * If we are taking an invalid opcode trap while in the kernel, this
509	 * is likely an FBT probe point.
510	 */
511	pushl   %gs
512	cmpw	$KGS_SEL, (%esp)
513	jne	8f
514
515	addl	$4, %esp
516#if defined(__xpv)
517	movb	$0, 6(%esp)		/* clear saved upcall_mask from %cs */
518#endif	/* __xpv */
519	pusha
520	pushl	%eax			/* push %eax -- may be return value */
521	pushl	%esp			/* push stack pointer */
522	addl	$48, (%esp)		/* adjust to incoming args */
523	pushl	40(%esp)		/* push calling EIP */
524	call	dtrace_invop
525	ALTENTRY(dtrace_invop_callsite)
526	addl	$12, %esp
527	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
528	je	1f
529	cmpl	$DTRACE_INVOP_POPL_EBP, %eax
530	je	2f
531	cmpl	$DTRACE_INVOP_LEAVE, %eax
532	je	3f
533	cmpl	$DTRACE_INVOP_NOP, %eax
534	je	4f
535	jmp	7f
5361:
537	/*
538	 * We must emulate a "pushl %ebp".  To do this, we pull the stack
539	 * down 4 bytes, and then store the base pointer.
540	 */
541	popa
542	subl	$4, %esp		/* make room for %ebp */
543	pushl	%eax			/* push temp */
544	movl	8(%esp), %eax		/* load calling EIP */
545	incl	%eax			/* increment over LOCK prefix */
546	movl	%eax, 4(%esp)		/* store calling EIP */
547	movl	12(%esp), %eax		/* load calling CS */
548	movl	%eax, 8(%esp)		/* store calling CS */
549	movl	16(%esp), %eax		/* load calling EFLAGS */
550	movl	%eax, 12(%esp)		/* store calling EFLAGS */
551	movl	%ebp, 16(%esp)		/* push %ebp */
552	popl	%eax			/* pop off temp */
553	jmp	_emul_done
5542:
555	/*
556	 * We must emulate a "popl %ebp".  To do this, we do the opposite of
557	 * the above:  we remove the %ebp from the stack, and squeeze up the
558	 * saved state from the trap.
559	 */
560	popa
561	pushl	%eax			/* push temp */
562	movl	16(%esp), %ebp		/* pop %ebp */
563	movl	12(%esp), %eax		/* load calling EFLAGS */
564	movl	%eax, 16(%esp)		/* store calling EFLAGS */
565	movl	8(%esp), %eax		/* load calling CS */
566	movl	%eax, 12(%esp)		/* store calling CS */
567	movl	4(%esp), %eax		/* load calling EIP */
568	incl	%eax			/* increment over LOCK prefix */
569	movl	%eax, 8(%esp)		/* store calling EIP */
570	popl	%eax			/* pop off temp */
571	addl	$4, %esp		/* adjust stack pointer */
572	jmp	_emul_done
5733:
574	/*
575	 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
576	 * followed by a "popl %ebp".  This looks similar to the above, but
577	 * requires two temporaries:  one for the new base pointer, and one
578	 * for the staging register.
579	 */
580	popa
581	pushl	%eax			/* push temp */
582	pushl	%ebx			/* push temp */
583	movl	%ebp, %ebx		/* set temp to old %ebp */
584	movl	(%ebx), %ebp		/* pop %ebp */
585	movl	16(%esp), %eax		/* load calling EFLAGS */
586	movl	%eax, (%ebx)		/* store calling EFLAGS */
587	movl	12(%esp), %eax		/* load calling CS */
588	movl	%eax, -4(%ebx)		/* store calling CS */
589	movl	8(%esp), %eax		/* load calling EIP */
590	incl	%eax			/* increment over LOCK prefix */
591	movl	%eax, -8(%ebx)		/* store calling EIP */
592	movl	%ebx, -4(%esp)		/* temporarily store new %esp */
593	popl	%ebx			/* pop off temp */
594	popl	%eax			/* pop off temp */
595	movl	-12(%esp), %esp		/* set stack pointer */
596	subl	$8, %esp		/* adjust for three pushes, one pop */
597	jmp	_emul_done
5984:
599	/*
600	 * We must emulate a "nop".  This is obviously not hard:  we need only
601	 * advance the %eip by one.
602	 */
603	popa
604	incl	(%esp)
605_emul_done:
606	IRET				/* return from interrupt */
6077:
608	popa
609	pushl	$0
610	pushl	$T_ILLINST	/* $6 */
611	jmp	cmntrap
6128:
613	addl	$4, %esp
614	pushl	$0
615	pushl	$T_ILLINST	/* $6 */
616	jmp	cmntrap
617	SET_SIZE(invoptrap)
618
619#endif	/* __i386 */
620
621#if defined(__amd64)
622
623	/*
624	 * #NM
625	 */
626#if defined(__xpv)
627
628	ENTRY_NP(ndptrap)
629	/*
630	 * (On the hypervisor we must make a hypercall so we might as well
631	 * save everything and handle as in a normal trap.)
632	 */
633	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
634	INTR_PUSH
635
636	/*
637	 * We want to do this quickly as every lwp using fp will take this
638	 * after a context switch -- we do the frequent path in ndptrap_frstor
639	 * below; for all other cases, we let the trap code handle it
640	 */
641	LOADCPU(%rax)			/* swapgs handled in hypervisor */
642	cmpl	$0, fpu_exists(%rip)
643	je	.handle_in_trap		/* let trap handle no fp case */
644	movq	CPU_THREAD(%rax), %rbx	/* %rbx = curthread */
645	movl	$FPU_EN, %eax
646	movq	T_LWP(%rbx), %rbx	/* %rbx = lwp */
647	testq	%rbx, %rbx
648	jz	.handle_in_trap		/* should not happen? */
649#if LWP_PCB_FPU	!= 0
650	addq	$LWP_PCB_FPU, %rbx	/* &lwp->lwp_pcb.pcb_fpu */
651#endif
652	testl	%eax, PCB_FPU_FLAGS(%rbx)
653	jz	.handle_in_trap		/* must be the first fault */
654	CLTS
655	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
656#if FPU_CTX_FPU_REGS != 0
657	addq	$FPU_CTX_FPU_REGS, %rbx
658#endif
659
660	movl	FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax	/* for xrstor */
661	movl	FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx	/* for xrstor */
662
663	/*
664	 * the label below is used in trap.c to detect FP faults in
665	 * kernel due to user fault.
666	 */
667	ALTENTRY(ndptrap_frstor)
668	.globl  _patch_xrstorq_rbx
669_patch_xrstorq_rbx:
670	FXRSTORQ	((%rbx))
671	cmpw	$KCS_SEL, REGOFF_CS(%rsp)
672	je	.return_to_kernel
673
674	ASSERT_UPCALL_MASK_IS_SET
675	USER_POP
676	IRET				/* return to user mode */
677	/*NOTREACHED*/
678
679.return_to_kernel:
680	INTR_POP
681	IRET
682	/*NOTREACHED*/
683
684.handle_in_trap:
685	INTR_POP
686	pushq	$0			/* can not use TRAP_NOERR */
687	pushq	$T_NOEXTFLT
688	jmp	cmninttrap
689	SET_SIZE(ndptrap_frstor)
690	SET_SIZE(ndptrap)
691
692#else	/* __xpv */
693
694	ENTRY_NP(ndptrap)
695	/*
696	 * We want to do this quickly as every lwp using fp will take this
697	 * after a context switch -- we do the frequent path in ndptrap_frstor
698	 * below; for all other cases, we let the trap code handle it
699	 */
700	pushq	%rax
701	pushq	%rbx
702	cmpw    $KCS_SEL, 24(%rsp)	/* did we come from kernel mode? */
703	jne     1f
704	LOADCPU(%rax)			/* if yes, don't swapgs */
705	jmp	2f
7061:
707	SWAPGS				/* if from user, need swapgs */
708	LOADCPU(%rax)
709	SWAPGS
7102:
711	/*
712	 * Xrstor needs to use edx as part of its flag.
713	 * NOTE: have to push rdx after "cmpw ...24(%rsp)", otherwise rsp+$24
714	 * will not point to CS.
715	 */
716	pushq	%rdx
717	cmpl	$0, fpu_exists(%rip)
718	je	.handle_in_trap		/* let trap handle no fp case */
719	movq	CPU_THREAD(%rax), %rbx	/* %rbx = curthread */
720	movl	$FPU_EN, %eax
721	movq	T_LWP(%rbx), %rbx	/* %rbx = lwp */
722	testq	%rbx, %rbx
723	jz	.handle_in_trap		/* should not happen? */
724#if LWP_PCB_FPU	!= 0
725	addq	$LWP_PCB_FPU, %rbx	/* &lwp->lwp_pcb.pcb_fpu */
726#endif
727	testl	%eax, PCB_FPU_FLAGS(%rbx)
728	jz	.handle_in_trap		/* must be the first fault */
729	clts
730	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
731#if FPU_CTX_FPU_REGS != 0
732	addq	$FPU_CTX_FPU_REGS, %rbx
733#endif
734
735	movl	FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax	/* for xrstor */
736	movl	FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx	/* for xrstor */
737
738	/*
739	 * the label below is used in trap.c to detect FP faults in
740	 * kernel due to user fault.
741	 */
742	ALTENTRY(ndptrap_frstor)
743	.globl  _patch_xrstorq_rbx
744_patch_xrstorq_rbx:
745	FXRSTORQ	((%rbx))
746	popq	%rdx
747	popq	%rbx
748	popq	%rax
749	IRET
750	/*NOTREACHED*/
751
752.handle_in_trap:
753	popq	%rdx
754	popq	%rbx
755	popq	%rax
756	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
757	jmp	cmninttrap
758	SET_SIZE(ndptrap_frstor)
759	SET_SIZE(ndptrap)
760
761#endif	/* __xpv */
762
763#elif defined(__i386)
764
765	ENTRY_NP(ndptrap)
766	/*
767	 * We want to do this quickly as every lwp using fp will take this
768	 * after a context switch -- we do the frequent path in fpnoextflt
769	 * below; for all other cases, we let the trap code handle it
770	 */
771	pushl	%eax
772	pushl	%ebx
773	pushl	%edx			/* for xrstor */
774	pushl	%ds
775	pushl	%gs
776	movl	$KDS_SEL, %ebx
777	movw	%bx, %ds
778	movl	$KGS_SEL, %eax
779	movw	%ax, %gs
780	LOADCPU(%eax)
781	cmpl	$0, fpu_exists
782	je	.handle_in_trap		/* let trap handle no fp case */
783	movl	CPU_THREAD(%eax), %ebx	/* %ebx = curthread */
784	movl	$FPU_EN, %eax
785	movl	T_LWP(%ebx), %ebx	/* %ebx = lwp */
786	testl	%ebx, %ebx
787	jz	.handle_in_trap		/* should not happen? */
788#if LWP_PCB_FPU != 0
789	addl	$LWP_PCB_FPU, %ebx 	/* &lwp->lwp_pcb.pcb_fpu */
790#endif
791	testl	%eax, PCB_FPU_FLAGS(%ebx)
792	jz	.handle_in_trap		/* must be the first fault */
793	CLTS
794	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx)
795#if FPU_CTX_FPU_REGS != 0
796	addl	$FPU_CTX_FPU_REGS, %ebx
797#endif
798
799	movl	FPU_CTX_FPU_XSAVE_MASK(%ebx), %eax	/* for xrstor */
800	movl	FPU_CTX_FPU_XSAVE_MASK+4(%ebx), %edx	/* for xrstor */
801
802	/*
803	 * the label below is used in trap.c to detect FP faults in kernel
804	 * due to user fault.
805	 */
806	ALTENTRY(ndptrap_frstor)
807	.globl  _patch_fxrstor_ebx
808_patch_fxrstor_ebx:
809	.globl  _patch_xrstor_ebx
810_patch_xrstor_ebx:
811	frstor	(%ebx)		/* may be patched to fxrstor */
812	nop			/* (including this byte) */
813	popl	%gs
814	popl	%ds
815	popl	%edx
816	popl	%ebx
817	popl	%eax
818	IRET
819
820.handle_in_trap:
821	popl	%gs
822	popl	%ds
823	popl	%edx
824	popl	%ebx
825	popl	%eax
826	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
827	jmp	cmninttrap
828	SET_SIZE(ndptrap_frstor)
829	SET_SIZE(ndptrap)
830
831#endif	/* __i386 */
832
833#if !defined(__xpv)
834#if defined(__amd64)
835
836	/*
837	 * #DF
838	 */
839	ENTRY_NP(syserrtrap)
840	pushq	$T_DBLFLT
841	SET_CPU_GSBASE
842
843	/*
844	 * We share this handler with kmdb (if kmdb is loaded).  As such, we
845	 * may have reached this point after encountering a #df in kmdb.  If
846	 * that happens, we'll still be on kmdb's IDT.  We need to switch back
847	 * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
848	 * here from kmdb, kmdb is probably in a very sickly state, and
849	 * shouldn't be entered from the panic flow.  We'll suppress that
850	 * entry by setting nopanicdebug.
851	 */
852	pushq	%rax
853	subq	$DESCTBR_SIZE, %rsp
854	sidt	(%rsp)
855	movq	%gs:CPU_IDT, %rax
856	cmpq	%rax, DTR_BASE(%rsp)
857	je	1f
858
859	movq	%rax, DTR_BASE(%rsp)
860	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
861	lidt	(%rsp)
862
863	movl	$1, nopanicdebug
864
8651:	addq	$DESCTBR_SIZE, %rsp
866	popq	%rax
867
868	DFTRAP_PUSH
869
870	/*
871	 * freeze trap trace.
872	 */
873#ifdef TRAPTRACE
874	leaq	trap_trace_freeze(%rip), %r11
875	incl	(%r11)
876#endif
877
878	ENABLE_INTR_FLAGS
879
880	movq	%rsp, %rdi	/* &regs */
881	xorl	%esi, %esi	/* clear address */
882	xorl	%edx, %edx	/* cpuid = 0 */
883	call	trap
884
885	SET_SIZE(syserrtrap)
886
887#elif defined(__i386)
888
889	/*
890	 * #DF
891	 */
892	ENTRY_NP(syserrtrap)
893	cli				/* disable interrupts */
894
895	/*
896	 * We share this handler with kmdb (if kmdb is loaded).  As such, we
897	 * may have reached this point after encountering a #df in kmdb.  If
898	 * that happens, we'll still be on kmdb's IDT.  We need to switch back
899	 * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
900	 * here from kmdb, kmdb is probably in a very sickly state, and
901	 * shouldn't be entered from the panic flow.  We'll suppress that
902	 * entry by setting nopanicdebug.
903	 */
904
905	subl	$DESCTBR_SIZE, %esp
906	movl	%gs:CPU_IDT, %eax
907	sidt	(%esp)
908	cmpl	DTR_BASE(%esp), %eax
909	je	1f
910
911	movl	%eax, DTR_BASE(%esp)
912	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
913	lidt	(%esp)
914
915	movl	$1, nopanicdebug
916
9171:	addl	$DESCTBR_SIZE, %esp
918
919	/*
920	 * Check the CPL in the TSS to see what mode
921	 * (user or kernel) we took the fault in.  At this
922	 * point we are running in the context of the double
923	 * fault task (dftss) but the CPU's task points to
924	 * the previous task (ktss) where the process context
925	 * has been saved as the result of the task switch.
926	 */
927	movl	%gs:CPU_TSS, %eax	/* get the TSS */
928	movl	TSS_SS(%eax), %ebx	/* save the fault SS */
929	movl	TSS_ESP(%eax), %edx	/* save the fault ESP */
930	testw	$CPL_MASK, TSS_CS(%eax)	/* user mode ? */
931	jz	make_frame
932	movw	TSS_SS0(%eax), %ss	/* get on the kernel stack */
933	movl	TSS_ESP0(%eax), %esp
934
935	/*
936	 * Clear the NT flag to avoid a task switch when the process
937	 * finally pops the EFL off the stack via an iret.  Clear
938	 * the TF flag since that is what the processor does for
939	 * a normal exception. Clear the IE flag so that interrupts
940	 * remain disabled.
941	 */
942	movl	TSS_EFL(%eax), %ecx
943	andl	$_BITNOT(PS_NT|PS_T|PS_IE), %ecx
944	pushl	%ecx
945	popfl				/* restore the EFL */
946	movw	TSS_LDT(%eax), %cx	/* restore the LDT */
947	lldt	%cx
948
949	/*
950	 * Restore process segment selectors.
951	 */
952	movw	TSS_DS(%eax), %ds
953	movw	TSS_ES(%eax), %es
954	movw	TSS_FS(%eax), %fs
955	movw	TSS_GS(%eax), %gs
956
957	/*
958	 * Restore task segment selectors.
959	 */
960	movl	$KDS_SEL, TSS_DS(%eax)
961	movl	$KDS_SEL, TSS_ES(%eax)
962	movl	$KDS_SEL, TSS_SS(%eax)
963	movl	$KFS_SEL, TSS_FS(%eax)
964	movl	$KGS_SEL, TSS_GS(%eax)
965
966	/*
967	 * Clear the TS bit, the busy bits in both task
968	 * descriptors, and switch tasks.
969	 */
970	clts
971	leal	gdt0, %ecx
972	movl	DFTSS_SEL+4(%ecx), %esi
973	andl	$_BITNOT(0x200), %esi
974	movl	%esi, DFTSS_SEL+4(%ecx)
975	movl	KTSS_SEL+4(%ecx), %esi
976	andl	$_BITNOT(0x200), %esi
977	movl	%esi, KTSS_SEL+4(%ecx)
978	movw	$KTSS_SEL, %cx
979	ltr	%cx
980
981	/*
982	 * Restore part of the process registers.
983	 */
984	movl	TSS_EBP(%eax), %ebp
985	movl	TSS_ECX(%eax), %ecx
986	movl	TSS_ESI(%eax), %esi
987	movl	TSS_EDI(%eax), %edi
988
989make_frame:
990	/*
991	 * Make a trap frame.  Leave the error code (0) on
992	 * the stack since the first word on a trap stack is
993	 * unused anyway.
994	 */
995	pushl	%ebx			/ fault SS
996	pushl	%edx			/ fault ESP
997	pushl	TSS_EFL(%eax)		/ fault EFL
998	pushl	TSS_CS(%eax)		/ fault CS
999	pushl	TSS_EIP(%eax)		/ fault EIP
1000	pushl	$0			/ error code
1001	pushl	$T_DBLFLT		/ trap number 8
1002	movl	TSS_EBX(%eax), %ebx	/ restore EBX
1003	movl	TSS_EDX(%eax), %edx	/ restore EDX
1004	movl	TSS_EAX(%eax), %eax	/ restore EAX
1005	sti				/ enable interrupts
1006	jmp	cmntrap
1007	SET_SIZE(syserrtrap)
1008
1009#endif	/* __i386 */
1010#endif	/* !__xpv */
1011
1012	ENTRY_NP(overrun)
1013	push	$0
1014	TRAP_NOERR(T_EXTOVRFLT)	/* $9 i386 only - not generated */
1015	jmp	cmninttrap
1016	SET_SIZE(overrun)
1017
1018	/*
1019	 * #TS
1020	 */
1021	ENTRY_NP(invtsstrap)
1022	TRAP_ERR(T_TSSFLT)	/* $10 already have error code on stack */
1023	jmp	cmntrap
1024	SET_SIZE(invtsstrap)
1025
1026	/*
1027	 * #NP
1028	 */
1029	ENTRY_NP(segnptrap)
1030	TRAP_ERR(T_SEGFLT)	/* $11 already have error code on stack */
1031#if defined(__amd64)
1032	SET_CPU_GSBASE
1033#endif
1034	jmp	cmntrap
1035	SET_SIZE(segnptrap)
1036
1037	/*
1038	 * #SS
1039	 */
1040	ENTRY_NP(stktrap)
1041	TRAP_ERR(T_STKFLT)	/* $12 already have error code on stack */
1042	jmp	cmntrap
1043	SET_SIZE(stktrap)
1044
1045	/*
1046	 * #GP
1047	 */
1048	ENTRY_NP(gptrap)
1049	TRAP_ERR(T_GPFLT)	/* $13 already have error code on stack */
1050#if defined(__amd64)
1051	SET_CPU_GSBASE
1052#endif
1053	jmp	cmntrap
1054	SET_SIZE(gptrap)
1055
1056	/*
1057	 * #PF
1058	 */
1059	ENTRY_NP(pftrap)
1060	TRAP_ERR(T_PGFLT)	/* $14 already have error code on stack */
1061	INTR_PUSH
1062#if defined(__xpv)
1063
1064#if defined(__amd64)
1065	movq	%gs:CPU_VCPU_INFO, %r15
1066	movq	VCPU_INFO_ARCH_CR2(%r15), %r15	/* vcpu[].arch.cr2 */
1067#elif defined(__i386)
1068	movl	%gs:CPU_VCPU_INFO, %esi
1069	movl	VCPU_INFO_ARCH_CR2(%esi), %esi	/* vcpu[].arch.cr2 */
1070#endif	/* __i386 */
1071
1072#else	/* __xpv */
1073
1074#if defined(__amd64)
1075	movq	%cr2, %r15
1076#elif defined(__i386)
1077	movl	%cr2, %esi
1078#endif	/* __i386 */
1079
1080#endif	/* __xpv */
1081	jmp	cmntrap_pushed
1082	SET_SIZE(pftrap)
1083
1084#if !defined(__amd64)
1085
1086	.globl	idt0_default_r
1087
1088	/*
1089	 * #PF pentium bug workaround
1090	 */
1091	ENTRY_NP(pentium_pftrap)
1092	pushl	%eax
1093	movl	%cr2, %eax
1094	andl	$MMU_STD_PAGEMASK, %eax
1095
1096	cmpl	%eax, %cs:idt0_default_r+2	/* fixme */
1097
1098	je	check_for_user_address
1099user_mode:
1100	popl	%eax
1101	pushl	$T_PGFLT	/* $14 */
1102	jmp	cmntrap
1103check_for_user_address:
1104	/*
1105	 * Before we assume that we have an unmapped trap on our hands,
1106	 * check to see if this is a fault from user mode.  If it is,
1107	 * we'll kick back into the page fault handler.
1108	 */
1109	movl	4(%esp), %eax	/* error code */
1110	andl	$PF_ERR_USER, %eax
1111	jnz	user_mode
1112
1113	/*
1114	 * We now know that this is the invalid opcode trap.
1115	 */
1116	popl	%eax
1117	addl	$4, %esp	/* pop error code */
1118	jmp	invoptrap
1119	SET_SIZE(pentium_pftrap)
1120
1121#endif	/* !__amd64 */
1122
1123	ENTRY_NP(resvtrap)
1124	TRAP_NOERR(15)		/* (reserved)  */
1125	jmp	cmntrap
1126	SET_SIZE(resvtrap)
1127
1128	/*
1129	 * #MF
1130	 */
1131	ENTRY_NP(ndperr)
1132	TRAP_NOERR(T_EXTERRFLT)	/* $16 */
1133	jmp	cmninttrap
1134	SET_SIZE(ndperr)
1135
1136	/*
1137	 * #AC
1138	 */
1139	ENTRY_NP(achktrap)
1140	TRAP_ERR(T_ALIGNMENT)	/* $17 */
1141	jmp	cmntrap
1142	SET_SIZE(achktrap)
1143
1144	/*
1145	 * #MC
1146	 */
1147	.globl	cmi_mca_trap	/* see uts/i86pc/os/cmi.c */
1148
1149#if defined(__amd64)
1150
1151	ENTRY_NP(mcetrap)
1152	TRAP_NOERR(T_MCE)	/* $18 */
1153
1154	SET_CPU_GSBASE
1155
1156	INTR_PUSH
1157	INTGATE_INIT_KERNEL_FLAGS
1158
1159	TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
1160	TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
1161	TRACE_STAMP(%rdi)
1162
1163	movq	%rsp, %rbp
1164
1165	movq	%rsp, %rdi	/* arg0 = struct regs *rp */
1166	call	cmi_mca_trap	/* cmi_mca_trap(rp); */
1167
1168	jmp	_sys_rtt
1169	SET_SIZE(mcetrap)
1170
1171#else
1172
1173	ENTRY_NP(mcetrap)
1174	TRAP_NOERR(T_MCE)	/* $18 */
1175
1176	INTR_PUSH
1177	INTGATE_INIT_KERNEL_FLAGS
1178
1179	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1180	TRACE_REGS(%edi, %esp, %ebx, %ecx)
1181	TRACE_STAMP(%edi)
1182
1183	movl	%esp, %ebp
1184
1185	movl	%esp, %ecx
1186	pushl	%ecx		/* arg0 = struct regs *rp */
1187	call	cmi_mca_trap	/* cmi_mca_trap(rp) */
1188	addl	$4, %esp	/* pop arg0 */
1189
1190	jmp	_sys_rtt
1191	SET_SIZE(mcetrap)
1192
1193#endif
1194
1195	/*
1196	 * #XF
1197	 */
1198	ENTRY_NP(xmtrap)
1199	TRAP_NOERR(T_SIMDFPE)	/* $19 */
1200	jmp	cmninttrap
1201	SET_SIZE(xmtrap)
1202
1203	ENTRY_NP(invaltrap)
1204	TRAP_NOERR(30)		/* very invalid */
1205	jmp	cmntrap
1206	SET_SIZE(invaltrap)
1207
1208	ENTRY_NP(invalint)
1209	TRAP_NOERR(31)		/* even more so */
1210	jmp	cmnint
1211	SET_SIZE(invalint)
1212
1213	.globl	fasttable
1214
1215#if defined(__amd64)
1216
1217	ENTRY_NP(fasttrap)
1218	cmpl	$T_LASTFAST, %eax
1219	ja	1f
1220	orl	%eax, %eax	/* (zero extend top 32-bits) */
1221	leaq	fasttable(%rip), %r11
1222	leaq	(%r11, %rax, CLONGSIZE), %r11
1223	jmp	*(%r11)
12241:
1225	/*
1226	 * Fast syscall number was illegal.  Make it look
1227	 * as if the INT failed.  Modify %rip to point before the
1228	 * INT, push the expected error code and fake a GP fault.
1229	 *
1230	 * XXX Why make the error code be offset into idt + 1?
1231	 * Instead we should push a real (soft?) error code
1232	 * on the stack and #gp handler could know about fasttraps?
1233	 */
1234	XPV_TRAP_POP
1235
1236	subq	$2, (%rsp)	/* XXX int insn 2-bytes */
1237	pushq	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1238
1239#if defined(__xpv)
1240	pushq	%r11
1241	pushq	%rcx
1242#endif
1243	jmp	gptrap
1244	SET_SIZE(fasttrap)
1245
1246#elif defined(__i386)
1247
1248	ENTRY_NP(fasttrap)
1249	cmpl	$T_LASTFAST, %eax
1250	ja	1f
1251	jmp	*%cs:fasttable(, %eax, CLONGSIZE)
12521:
1253	/*
1254	 * Fast syscall number was illegal.  Make it look
1255	 * as if the INT failed.  Modify %eip to point before the
1256	 * INT, push the expected error code and fake a GP fault.
1257	 *
1258	 * XXX Why make the error code be offset into idt + 1?
1259	 * Instead we should push a real (soft?) error code
1260	 * on the stack and #gp handler could know about fasttraps?
1261	 */
1262	subl	$2, (%esp)	/* XXX int insn 2-bytes */
1263	pushl	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1264	jmp	gptrap
1265	SET_SIZE(fasttrap)
1266
1267#endif	/* __i386 */
1268
1269	ENTRY_NP(dtrace_ret)
1270	TRAP_NOERR(T_DTRACE_RET)
1271	jmp	dtrace_trap
1272	SET_SIZE(dtrace_ret)
1273
1274#if defined(__amd64)
1275
1276	/*
1277	 * RFLAGS 24 bytes up the stack from %rsp.
1278	 * XXX a constant would be nicer.
1279	 */
1280	ENTRY_NP(fast_null)
1281	XPV_TRAP_POP
1282	orq	$PS_C, 24(%rsp)	/* set carry bit in user flags */
1283	IRET
1284	/*NOTREACHED*/
1285	SET_SIZE(fast_null)
1286
1287#elif defined(__i386)
1288
1289	ENTRY_NP(fast_null)
1290	orw	$PS_C, 8(%esp)	/* set carry bit in user flags */
1291	IRET
1292	SET_SIZE(fast_null)
1293
1294#endif	/* __i386 */
1295
1296	/*
1297	 * Interrupts start at 32
1298	 */
1299#define MKIVCT(n)			\
1300	ENTRY_NP(ivct/**/n)		\
1301	push	$0;			\
1302	push	$n - 0x20;		\
1303	jmp	cmnint;			\
1304	SET_SIZE(ivct/**/n)
1305
1306	MKIVCT(32)
1307	MKIVCT(33)
1308	MKIVCT(34)
1309	MKIVCT(35)
1310	MKIVCT(36)
1311	MKIVCT(37)
1312	MKIVCT(38)
1313	MKIVCT(39)
1314	MKIVCT(40)
1315	MKIVCT(41)
1316	MKIVCT(42)
1317	MKIVCT(43)
1318	MKIVCT(44)
1319	MKIVCT(45)
1320	MKIVCT(46)
1321	MKIVCT(47)
1322	MKIVCT(48)
1323	MKIVCT(49)
1324	MKIVCT(50)
1325	MKIVCT(51)
1326	MKIVCT(52)
1327	MKIVCT(53)
1328	MKIVCT(54)
1329	MKIVCT(55)
1330	MKIVCT(56)
1331	MKIVCT(57)
1332	MKIVCT(58)
1333	MKIVCT(59)
1334	MKIVCT(60)
1335	MKIVCT(61)
1336	MKIVCT(62)
1337	MKIVCT(63)
1338	MKIVCT(64)
1339	MKIVCT(65)
1340	MKIVCT(66)
1341	MKIVCT(67)
1342	MKIVCT(68)
1343	MKIVCT(69)
1344	MKIVCT(70)
1345	MKIVCT(71)
1346	MKIVCT(72)
1347	MKIVCT(73)
1348	MKIVCT(74)
1349	MKIVCT(75)
1350	MKIVCT(76)
1351	MKIVCT(77)
1352	MKIVCT(78)
1353	MKIVCT(79)
1354	MKIVCT(80)
1355	MKIVCT(81)
1356	MKIVCT(82)
1357	MKIVCT(83)
1358	MKIVCT(84)
1359	MKIVCT(85)
1360	MKIVCT(86)
1361	MKIVCT(87)
1362	MKIVCT(88)
1363	MKIVCT(89)
1364	MKIVCT(90)
1365	MKIVCT(91)
1366	MKIVCT(92)
1367	MKIVCT(93)
1368	MKIVCT(94)
1369	MKIVCT(95)
1370	MKIVCT(96)
1371	MKIVCT(97)
1372	MKIVCT(98)
1373	MKIVCT(99)
1374	MKIVCT(100)
1375	MKIVCT(101)
1376	MKIVCT(102)
1377	MKIVCT(103)
1378	MKIVCT(104)
1379	MKIVCT(105)
1380	MKIVCT(106)
1381	MKIVCT(107)
1382	MKIVCT(108)
1383	MKIVCT(109)
1384	MKIVCT(110)
1385	MKIVCT(111)
1386	MKIVCT(112)
1387	MKIVCT(113)
1388	MKIVCT(114)
1389	MKIVCT(115)
1390	MKIVCT(116)
1391	MKIVCT(117)
1392	MKIVCT(118)
1393	MKIVCT(119)
1394	MKIVCT(120)
1395	MKIVCT(121)
1396	MKIVCT(122)
1397	MKIVCT(123)
1398	MKIVCT(124)
1399	MKIVCT(125)
1400	MKIVCT(126)
1401	MKIVCT(127)
1402	MKIVCT(128)
1403	MKIVCT(129)
1404	MKIVCT(130)
1405	MKIVCT(131)
1406	MKIVCT(132)
1407	MKIVCT(133)
1408	MKIVCT(134)
1409	MKIVCT(135)
1410	MKIVCT(136)
1411	MKIVCT(137)
1412	MKIVCT(138)
1413	MKIVCT(139)
1414	MKIVCT(140)
1415	MKIVCT(141)
1416	MKIVCT(142)
1417	MKIVCT(143)
1418	MKIVCT(144)
1419	MKIVCT(145)
1420	MKIVCT(146)
1421	MKIVCT(147)
1422	MKIVCT(148)
1423	MKIVCT(149)
1424	MKIVCT(150)
1425	MKIVCT(151)
1426	MKIVCT(152)
1427	MKIVCT(153)
1428	MKIVCT(154)
1429	MKIVCT(155)
1430	MKIVCT(156)
1431	MKIVCT(157)
1432	MKIVCT(158)
1433	MKIVCT(159)
1434	MKIVCT(160)
1435	MKIVCT(161)
1436	MKIVCT(162)
1437	MKIVCT(163)
1438	MKIVCT(164)
1439	MKIVCT(165)
1440	MKIVCT(166)
1441	MKIVCT(167)
1442	MKIVCT(168)
1443	MKIVCT(169)
1444	MKIVCT(170)
1445	MKIVCT(171)
1446	MKIVCT(172)
1447	MKIVCT(173)
1448	MKIVCT(174)
1449	MKIVCT(175)
1450	MKIVCT(176)
1451	MKIVCT(177)
1452	MKIVCT(178)
1453	MKIVCT(179)
1454	MKIVCT(180)
1455	MKIVCT(181)
1456	MKIVCT(182)
1457	MKIVCT(183)
1458	MKIVCT(184)
1459	MKIVCT(185)
1460	MKIVCT(186)
1461	MKIVCT(187)
1462	MKIVCT(188)
1463	MKIVCT(189)
1464	MKIVCT(190)
1465	MKIVCT(191)
1466	MKIVCT(192)
1467	MKIVCT(193)
1468	MKIVCT(194)
1469	MKIVCT(195)
1470	MKIVCT(196)
1471	MKIVCT(197)
1472	MKIVCT(198)
1473	MKIVCT(199)
1474	MKIVCT(200)
1475	MKIVCT(201)
1476	MKIVCT(202)
1477	MKIVCT(203)
1478	MKIVCT(204)
1479	MKIVCT(205)
1480	MKIVCT(206)
1481	MKIVCT(207)
1482	MKIVCT(208)
1483	MKIVCT(209)
1484	MKIVCT(210)
1485	MKIVCT(211)
1486	MKIVCT(212)
1487	MKIVCT(213)
1488	MKIVCT(214)
1489	MKIVCT(215)
1490	MKIVCT(216)
1491	MKIVCT(217)
1492	MKIVCT(218)
1493	MKIVCT(219)
1494	MKIVCT(220)
1495	MKIVCT(221)
1496	MKIVCT(222)
1497	MKIVCT(223)
1498	MKIVCT(224)
1499	MKIVCT(225)
1500	MKIVCT(226)
1501	MKIVCT(227)
1502	MKIVCT(228)
1503	MKIVCT(229)
1504	MKIVCT(230)
1505	MKIVCT(231)
1506	MKIVCT(232)
1507	MKIVCT(233)
1508	MKIVCT(234)
1509	MKIVCT(235)
1510	MKIVCT(236)
1511	MKIVCT(237)
1512	MKIVCT(238)
1513	MKIVCT(239)
1514	MKIVCT(240)
1515	MKIVCT(241)
1516	MKIVCT(242)
1517	MKIVCT(243)
1518	MKIVCT(244)
1519	MKIVCT(245)
1520	MKIVCT(246)
1521	MKIVCT(247)
1522	MKIVCT(248)
1523	MKIVCT(249)
1524	MKIVCT(250)
1525	MKIVCT(251)
1526	MKIVCT(252)
1527	MKIVCT(253)
1528	MKIVCT(254)
1529	MKIVCT(255)
1530
1531#endif	/* __lint */
1532