xref: /titanic_52/usr/src/uts/intel/ia32/ml/exception.s (revision 71269a2275bf5a143dad6461eee2710a344e7261)
1/*
2 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3 * Use is subject to license terms.
4 */
5
6/*
7 * Copyright (c) 1989, 1990 William F. Jolitz.
8 * Copyright (c) 1990 The Regents of the University of California.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
40 */
41
42#pragma ident	"%Z%%M%	%I%	%E% SMI"
43
44#include <sys/asm_linkage.h>
45#include <sys/asm_misc.h>
46#include <sys/trap.h>
47#include <sys/psw.h>
48#include <sys/regset.h>
49#include <sys/privregs.h>
50#include <sys/dtrace.h>
51#include <sys/x86_archext.h>
52#include <sys/traptrace.h>
53#include <sys/machparam.h>
54
55/*
56 * only one routine in this file is interesting to lint
57 */
58
59#if defined(__lint)
60
61void
62ndptrap_frstor(void)
63{}
64
65#else
66
67#include "assym.h"
68
69/*
70 * push $0 on stack for traps that do not
71 * generate an error code. This is so the rest
72 * of the kernel can expect a consistent stack
73 * from from any exception.
74 *
75 * Note that for all exceptions for amd64
76 * %r11 and %rcx are on the stack. Just pop
77 * them back into their appropriate registers and let
78 * it get saved as is running native.
79 */
80
81#if defined(__xpv) && defined(__amd64)
82
83#define	NPTRAP_NOERR(trapno)	\
84	pushq	$0;		\
85	pushq	$trapno
86
87#define	TRAP_NOERR(trapno)	\
88	XPV_TRAP_POP;		\
89	NPTRAP_NOERR(trapno)
90
91/*
92 * error code already pushed by hw
93 * onto stack.
94 */
95#define	TRAP_ERR(trapno)	\
96	XPV_TRAP_POP;		\
97	pushq	$trapno
98
99#else /* __xpv && __amd64 */
100
101#define	TRAP_NOERR(trapno)	\
102	push	$0;		\
103	push	$trapno
104
105#define	NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
106
107/*
108 * error code already pushed by hw
109 * onto stack.
110 */
111#define	TRAP_ERR(trapno)	\
112	push	$trapno
113
114#endif	/* __xpv && __amd64 */
115
116
117	/*
118	 * #DE
119	 */
120	ENTRY_NP(div0trap)
121	TRAP_NOERR(T_ZERODIV)	/* $0 */
122	jmp	cmntrap
123	SET_SIZE(div0trap)
124
125	/*
126	 * #DB
127	 *
128	 * Fetch %dr6 and clear it, handing off the value to the
129	 * cmntrap code in %r15/%esi
130	 */
131	ENTRY_NP(dbgtrap)
132	TRAP_NOERR(T_SGLSTP)	/* $1 */
133
134#if defined(__amd64)
135#if !defined(__xpv)		/* no sysenter support yet */
136	/*
137	 * If we get here as a result of single-stepping a sysenter
138	 * instruction, we suddenly find ourselves taking a #db
139	 * in kernel mode -before- we've swapgs'ed.  So before we can
140	 * take the trap, we do the swapgs here, and fix the return
141	 * %rip in trap() so that we return immediately after the
142	 * swapgs in the sysenter handler to avoid doing the swapgs again.
143	 *
144	 * Nobody said that the design of sysenter was particularly
145	 * elegant, did they?
146	 */
147
148	pushq	%r11
149
150	/*
151	 * At this point the stack looks like this:
152	 *
153	 * (high address) 	r_ss
154	 *			r_rsp
155	 *			r_rfl
156	 *			r_cs
157	 *			r_rip		<-- %rsp + 24
158	 *			r_err		<-- %rsp + 16
159	 *			r_trapno	<-- %rsp + 8
160	 * (low address)	%r11		<-- %rsp
161	 */
162	leaq	sys_sysenter(%rip), %r11
163	cmpq	%r11, 24(%rsp)	/* Compare to saved r_rip on the stack */
164	jne	1f
165	SWAPGS
1661:	popq	%r11
167#endif	/* !__xpv */
168
169	INTR_PUSH
170#if defined(__xpv)
171	movl	$6, %edi
172	call	kdi_dreg_get
173	movq	%rax, %r15		/* %db6 -> %r15 */
174	movl	$6, %edi
175	movl	$0, %esi
176	call	kdi_dreg_set		/* 0 -> %db6 */
177#else
178	movq	%db6, %r15
179	xorl	%eax, %eax
180	movq	%rax, %db6
181#endif
182
183#elif defined(__i386)
184
185	INTR_PUSH
186#if defined(__xpv)
187	pushl	$6
188	call	kdi_dreg_get
189	addl	$4, %esp
190	movl	%eax, %esi		/* %dr6 -> %esi */
191	pushl	$0
192	pushl	$6
193	call	kdi_dreg_set		/* 0 -> %dr6 */
194	addl	$8, %esp
195#else
196	movl	%db6, %esi
197	xorl	%eax, %eax
198	movl	%eax, %db6
199#endif
200#endif	/* __i386 */
201
202	jmp	cmntrap_pushed
203	SET_SIZE(dbgtrap)
204
205#if defined(__amd64)
206#if !defined(__xpv)
207
208/*
209 * Macro to set the gsbase or kgsbase to the address of the struct cpu
210 * for this processor.  If we came from userland, set kgsbase else
211 * set gsbase.  We find the proper cpu struct by looping through
212 * the cpu structs for all processors till we find a match for the gdt
213 * of the trapping processor.  The stack is expected to be pointing at
214 * the standard regs pushed by hardware on a trap (plus error code and trapno).
215 */
216#define	SET_CPU_GSBASE							\
217	subq	$REGOFF_TRAPNO, %rsp;	/* save regs */			\
218	movq	%rax, REGOFF_RAX(%rsp);					\
219	movq	%rbx, REGOFF_RBX(%rsp);					\
220	movq	%rcx, REGOFF_RCX(%rsp);					\
221	movq	%rdx, REGOFF_RDX(%rsp);					\
222	movq	%rbp, REGOFF_RBP(%rsp);					\
223	movq	%rsp, %rbp;						\
224	subq	$16, %rsp;		/* space for gdt */		\
225	sgdt	6(%rsp);						\
226	movq	8(%rsp), %rcx;		/* %rcx has gdt to match */	\
227	xorl	%ebx, %ebx;		/* loop index */		\
228	leaq	cpu(%rip), %rdx;	/* cpu pointer array */		\
2291:									\
230	movq	(%rdx, %rbx, CLONGSIZE), %rax;	/* get cpu[i] */	\
231	cmpq	$0x0, %rax;		/* cpu[i] == NULL ? */		\
232	je	2f;			/* yes, continue */		\
233	cmpq	%rcx, CPU_GDT(%rax);	/* gdt == cpu[i]->cpu_gdt ? */	\
234	je	3f;			/* yes, go set gsbase */	\
2352:									\
236	incl	%ebx;			/* i++ */			\
237	cmpl	$NCPU, %ebx;		/* i < NCPU ? */		\
238	jb	1b;			/* yes, loop */			\
239/* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */	\
2403:									\
241	movl	$MSR_AMD_KGSBASE, %ecx;					\
242	cmpw	$KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */	\
243	jne	4f;			/* no, go set KGSBASE */	\
244	movl	$MSR_AMD_GSBASE, %ecx;	/* yes, set GSBASE */		\
245        mfence;				/* OPTERON_ERRATUM_88 */	\
2464:									\
247	movq	%rax, %rdx;		/* write base register */	\
248	shrq	$32, %rdx;						\
249	wrmsr;								\
250	movq	REGOFF_RDX(%rbp), %rdx;	/* restore regs */		\
251	movq	REGOFF_RCX(%rbp), %rcx;					\
252	movq	REGOFF_RBX(%rbp), %rbx;					\
253	movq	REGOFF_RAX(%rbp), %rax;					\
254	movq	%rbp, %rsp;						\
255	movq	REGOFF_RBP(%rsp), %rbp;					\
256	addq	$REGOFF_TRAPNO, %rsp	/* pop stack */
257
258#else	/* __xpv */
259
260#define	SET_CPU_GSBASE	/* noop on the hypervisor */
261
262#endif	/* __xpv */
263#endif	/* __amd64 */
264
265
266#if defined(__amd64)
267
268	/*
269	 * #NMI
270	 *
271	 * XXPV: See 6532669.
272	 */
273	ENTRY_NP(nmiint)
274	TRAP_NOERR(T_NMIFLT)	/* $2 */
275
276	SET_CPU_GSBASE
277
278	/*
279	 * Save all registers and setup segment registers
280	 * with kernel selectors.
281	 */
282	INTR_PUSH
283	INTGATE_INIT_KERNEL_FLAGS
284
285	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
286	TRACE_REGS(%r12, %rsp, %rax, %rbx)
287	TRACE_STAMP(%r12)
288
289	movq	%rsp, %rbp
290
291	movq	%rbp, %rdi
292	call	av_dispatch_nmivect
293
294	INTR_POP
295	IRET
296	/*NOTREACHED*/
297	SET_SIZE(nmiint)
298
299#elif defined(__i386)
300
301	/*
302	 * #NMI
303	 */
304	ENTRY_NP(nmiint)
305	TRAP_NOERR(T_NMIFLT)	/* $2 */
306
307	/*
308	 * Save all registers and setup segment registers
309	 * with kernel selectors.
310	 */
311	INTR_PUSH
312	INTGATE_INIT_KERNEL_FLAGS
313
314	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
315	TRACE_REGS(%edi, %esp, %ebx, %ecx)
316	TRACE_STAMP(%edi)
317
318	movl	%esp, %ebp
319
320	pushl	%ebp
321	call	av_dispatch_nmivect
322	addl	$4, %esp
323
324	INTR_POP_USER
325	IRET
326	SET_SIZE(nmiint)
327
328#endif	/* __i386 */
329
330	/*
331	 * #BP
332	 */
333	ENTRY_NP(brktrap)
334
335#if defined(__amd64)
336	XPV_TRAP_POP
337	cmpw	$KCS_SEL, 8(%rsp)
338	jne	bp_user
339
340	/*
341	 * This is a breakpoint in the kernel -- it is very likely that this
342	 * is DTrace-induced.  To unify DTrace handling, we spoof this as an
343	 * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
344	 * we must decrement the trapping %rip to make it appear as a fault.
345	 * We then push a non-zero error code to indicate that this is coming
346	 * from #BP.
347	 */
348	decq	(%rsp)
349	push	$1			/* error code -- non-zero for #BP */
350	jmp	ud_kernel
351
352bp_user:
353#endif /* __amd64 */
354
355	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
356	jmp	dtrace_trap
357
358	SET_SIZE(brktrap)
359
360	/*
361	 * #OF
362	 */
363	ENTRY_NP(ovflotrap)
364	TRAP_NOERR(T_OVFLW)	/* $4 */
365	jmp	cmntrap
366	SET_SIZE(ovflotrap)
367
368	/*
369	 * #BR
370	 */
371	ENTRY_NP(boundstrap)
372	TRAP_NOERR(T_BOUNDFLT)	/* $5 */
373	jmp	cmntrap
374	SET_SIZE(boundstrap)
375
376#if defined(__amd64)
377
378	ENTRY_NP(invoptrap)
379
380	XPV_TRAP_POP
381
382	cmpw	$KCS_SEL, 8(%rsp)
383	jne	ud_user
384
385#if defined(__xpv)
386	movb	$0, 12(%rsp)		/* clear saved upcall_mask from %cs */
387#endif
388	push	$0			/* error code -- zero for #UD */
389ud_kernel:
390	push	$0xdddd			/* a dummy trap number */
391	INTR_PUSH
392	movq	REGOFF_RIP(%rsp), %rdi
393	movq	REGOFF_RSP(%rsp), %rsi
394	movq	REGOFF_RAX(%rsp), %rdx
395	pushq	(%rsi)
396	movq	%rsp, %rsi
397	call	dtrace_invop
398	ALTENTRY(dtrace_invop_callsite)
399	addq	$8, %rsp
400	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
401	je	ud_push
402	cmpl	$DTRACE_INVOP_LEAVE, %eax
403	je	ud_leave
404	cmpl	$DTRACE_INVOP_NOP, %eax
405	je	ud_nop
406	cmpl	$DTRACE_INVOP_RET, %eax
407	je	ud_ret
408	jmp	ud_trap
409
410ud_push:
411	/*
412	 * We must emulate a "pushq %rbp".  To do this, we pull the stack
413	 * down 8 bytes, and then store the base pointer.
414	 */
415	INTR_POP
416	subq	$16, %rsp		/* make room for %rbp */
417	pushq	%rax			/* push temp */
418	movq	24(%rsp), %rax		/* load calling RIP */
419	addq	$1, %rax		/* increment over trapping instr */
420	movq	%rax, 8(%rsp)		/* store calling RIP */
421	movq	32(%rsp), %rax		/* load calling CS */
422	movq	%rax, 16(%rsp)		/* store calling CS */
423	movq	40(%rsp), %rax		/* load calling RFLAGS */
424	movq	%rax, 24(%rsp)		/* store calling RFLAGS */
425	movq	48(%rsp), %rax		/* load calling RSP */
426	subq	$8, %rax		/* make room for %rbp */
427	movq	%rax, 32(%rsp)		/* store calling RSP */
428	movq	56(%rsp), %rax		/* load calling SS */
429	movq	%rax, 40(%rsp)		/* store calling SS */
430	movq	32(%rsp), %rax		/* reload calling RSP */
431	movq	%rbp, (%rax)		/* store %rbp there */
432	popq	%rax			/* pop off temp */
433	IRET				/* return from interrupt */
434	/*NOTREACHED*/
435
436ud_leave:
437	/*
438	 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
439	 * followed by a "popq %rbp".  This is quite a bit simpler on amd64
440	 * than it is on i386 -- we can exploit the fact that the %rsp is
441	 * explicitly saved to effect the pop without having to reshuffle
442	 * the other data pushed for the trap.
443	 */
444	INTR_POP
445	pushq	%rax			/* push temp */
446	movq	8(%rsp), %rax		/* load calling RIP */
447	addq	$1, %rax		/* increment over trapping instr */
448	movq	%rax, 8(%rsp)		/* store calling RIP */
449	movq	(%rbp), %rax		/* get new %rbp */
450	addq	$8, %rbp		/* adjust new %rsp */
451	movq	%rbp, 32(%rsp)		/* store new %rsp */
452	movq	%rax, %rbp		/* set new %rbp */
453	popq	%rax			/* pop off temp */
454	IRET				/* return from interrupt */
455	/*NOTREACHED*/
456
457ud_nop:
458	/*
459	 * We must emulate a "nop".  This is obviously not hard:  we need only
460	 * advance the %rip by one.
461	 */
462	INTR_POP
463	incq	(%rsp)
464	IRET
465	/*NOTREACHED*/
466
467ud_ret:
468	INTR_POP
469	pushq	%rax			/* push temp */
470	movq	32(%rsp), %rax		/* load %rsp */
471	movq	(%rax), %rax		/* load calling RIP */
472	movq	%rax, 8(%rsp)		/* store calling RIP */
473	addq	$8, 32(%rsp)		/* adjust new %rsp */
474	popq	%rax			/* pop off temp */
475	IRET				/* return from interrupt */
476	/*NOTREACHED*/
477
478ud_trap:
479	/*
480	 * We're going to let the kernel handle this as a normal #UD.  If,
481	 * however, we came through #BP and are spoofing #UD (in this case,
482	 * the stored error value will be non-zero), we need to de-spoof
483	 * the trap by incrementing %rip and pushing T_BPTFLT.
484	 */
485	cmpq	$0, REGOFF_ERR(%rsp)
486	je	ud_ud
487	incq	REGOFF_RIP(%rsp)
488	addq	$REGOFF_RIP, %rsp
489	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
490	jmp	cmntrap
491
492ud_ud:
493	addq	$REGOFF_RIP, %rsp
494ud_user:
495	NPTRAP_NOERR(T_ILLINST)
496	jmp	cmntrap
497	SET_SIZE(invoptrap)
498
499#elif defined(__i386)
500
501	/*
502	 * #UD
503	 */
504	ENTRY_NP(invoptrap)
505	/*
506	 * If we are taking an invalid opcode trap while in the kernel, this
507	 * is likely an FBT probe point.
508	 */
509	pushl   %gs
510	cmpw	$KGS_SEL, (%esp)
511	jne	8f
512
513	addl	$4, %esp
514#if defined(__xpv)
515	movb	$0, 6(%esp)		/* clear saved upcall_mask from %cs */
516#endif	/* __xpv */
517	pusha
518	pushl	%eax			/* push %eax -- may be return value */
519	pushl	%esp			/* push stack pointer */
520	addl	$48, (%esp)		/* adjust to incoming args */
521	pushl	40(%esp)		/* push calling EIP */
522	call	dtrace_invop
523	ALTENTRY(dtrace_invop_callsite)
524	addl	$12, %esp
525	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
526	je	1f
527	cmpl	$DTRACE_INVOP_POPL_EBP, %eax
528	je	2f
529	cmpl	$DTRACE_INVOP_LEAVE, %eax
530	je	3f
531	cmpl	$DTRACE_INVOP_NOP, %eax
532	je	4f
533	jmp	7f
5341:
535	/*
536	 * We must emulate a "pushl %ebp".  To do this, we pull the stack
537	 * down 4 bytes, and then store the base pointer.
538	 */
539	popa
540	subl	$4, %esp		/* make room for %ebp */
541	pushl	%eax			/* push temp */
542	movl	8(%esp), %eax		/* load calling EIP */
543	incl	%eax			/* increment over LOCK prefix */
544	movl	%eax, 4(%esp)		/* store calling EIP */
545	movl	12(%esp), %eax		/* load calling CS */
546	movl	%eax, 8(%esp)		/* store calling CS */
547	movl	16(%esp), %eax		/* load calling EFLAGS */
548	movl	%eax, 12(%esp)		/* store calling EFLAGS */
549	movl	%ebp, 16(%esp)		/* push %ebp */
550	popl	%eax			/* pop off temp */
551	jmp	_emul_done
5522:
553	/*
554	 * We must emulate a "popl %ebp".  To do this, we do the opposite of
555	 * the above:  we remove the %ebp from the stack, and squeeze up the
556	 * saved state from the trap.
557	 */
558	popa
559	pushl	%eax			/* push temp */
560	movl	16(%esp), %ebp		/* pop %ebp */
561	movl	12(%esp), %eax		/* load calling EFLAGS */
562	movl	%eax, 16(%esp)		/* store calling EFLAGS */
563	movl	8(%esp), %eax		/* load calling CS */
564	movl	%eax, 12(%esp)		/* store calling CS */
565	movl	4(%esp), %eax		/* load calling EIP */
566	incl	%eax			/* increment over LOCK prefix */
567	movl	%eax, 8(%esp)		/* store calling EIP */
568	popl	%eax			/* pop off temp */
569	addl	$4, %esp		/* adjust stack pointer */
570	jmp	_emul_done
5713:
572	/*
573	 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
574	 * followed by a "popl %ebp".  This looks similar to the above, but
575	 * requires two temporaries:  one for the new base pointer, and one
576	 * for the staging register.
577	 */
578	popa
579	pushl	%eax			/* push temp */
580	pushl	%ebx			/* push temp */
581	movl	%ebp, %ebx		/* set temp to old %ebp */
582	movl	(%ebx), %ebp		/* pop %ebp */
583	movl	16(%esp), %eax		/* load calling EFLAGS */
584	movl	%eax, (%ebx)		/* store calling EFLAGS */
585	movl	12(%esp), %eax		/* load calling CS */
586	movl	%eax, -4(%ebx)		/* store calling CS */
587	movl	8(%esp), %eax		/* load calling EIP */
588	incl	%eax			/* increment over LOCK prefix */
589	movl	%eax, -8(%ebx)		/* store calling EIP */
590	movl	%ebx, -4(%esp)		/* temporarily store new %esp */
591	popl	%ebx			/* pop off temp */
592	popl	%eax			/* pop off temp */
593	movl	-12(%esp), %esp		/* set stack pointer */
594	subl	$8, %esp		/* adjust for three pushes, one pop */
595	jmp	_emul_done
5964:
597	/*
598	 * We must emulate a "nop".  This is obviously not hard:  we need only
599	 * advance the %eip by one.
600	 */
601	popa
602	incl	(%esp)
603_emul_done:
604	IRET				/* return from interrupt */
6057:
606	popa
607	pushl	$0
608	pushl	$T_ILLINST	/* $6 */
609	jmp	cmntrap
6108:
611	addl	$4, %esp
612	pushl	$0
613	pushl	$T_ILLINST	/* $6 */
614	jmp	cmntrap
615	SET_SIZE(invoptrap)
616
617#endif	/* __i386 */
618
619#if defined(__amd64)
620
621	/*
622	 * #NM
623	 */
624#if defined(__xpv)
625
626	ENTRY_NP(ndptrap)
627	/*
628	 * (On the hypervisor we must make a hypercall so we might as well
629	 * save everything and handle as in a normal trap.)
630	 */
631	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
632	INTR_PUSH
633
634	/*
635	 * We want to do this quickly as every lwp using fp will take this
636	 * after a context switch -- we do the frequent path in ndptrap_frstor
637	 * below; for all other cases, we let the trap code handle it
638	 */
639	LOADCPU(%rbx)			/* swapgs handled in hypervisor */
640	cmpl	$0, fpu_exists(%rip)
641	je	.handle_in_trap		/* let trap handle no fp case */
642	movq	CPU_THREAD(%rbx), %r15	/* %r15 = curthread */
643	movl	$FPU_EN, %ebx
644	movq	T_LWP(%r15), %r15	/* %r15 = lwp */
645	testq	%r15, %r15
646	jz	.handle_in_trap		/* should not happen? */
647#if LWP_PCB_FPU	!= 0
648	addq	$LWP_PCB_FPU, %r15	/* &lwp->lwp_pcb.pcb_fpu */
649#endif
650	testl	%ebx, PCB_FPU_FLAGS(%r15)
651	jz	.handle_in_trap		/* must be the first fault */
652	CLTS
653	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%r15)
654#if FPU_CTX_FPU_REGS != 0
655	addq	$FPU_CTX_FPU_REGS, %r15
656#endif
657	/*
658	 * the label below is used in trap.c to detect FP faults in
659	 * kernel due to user fault.
660	 */
661	ALTENTRY(ndptrap_frstor)
662	FXRSTORQ	((%r15))
663	cmpw	$KCS_SEL, REGOFF_CS(%rsp)
664	je	.return_to_kernel
665
666	ASSERT_UPCALL_MASK_IS_SET
667	USER_POP
668	IRET				/* return to user mode */
669	/*NOTREACHED*/
670
671.return_to_kernel:
672	INTR_POP
673	IRET
674	/*NOTREACHED*/
675
676.handle_in_trap:
677	INTR_POP
678	pushq	$0			/* can not use TRAP_NOERR */
679	pushq	$T_NOEXTFLT
680	jmp	cmninttrap
681	SET_SIZE(ndptrap_frstor)
682	SET_SIZE(ndptrap)
683
684#else	/* __xpv */
685
686	ENTRY_NP(ndptrap)
687	/*
688	 * We want to do this quickly as every lwp using fp will take this
689	 * after a context switch -- we do the frequent path in ndptrap_frstor
690	 * below; for all other cases, we let the trap code handle it
691	 */
692	pushq	%rax
693	pushq	%rbx
694	cmpw    $KCS_SEL, 24(%rsp)	/* did we come from kernel mode? */
695	jne     1f
696	LOADCPU(%rbx)			/* if yes, don't swapgs */
697	jmp	2f
6981:
699	SWAPGS				/* if from user, need swapgs */
700	LOADCPU(%rbx)
701	SWAPGS
7022:
703	cmpl	$0, fpu_exists(%rip)
704	je	.handle_in_trap		/* let trap handle no fp case */
705	movq	CPU_THREAD(%rbx), %rax	/* %rax = curthread */
706	movl	$FPU_EN, %ebx
707	movq	T_LWP(%rax), %rax	/* %rax = lwp */
708	testq	%rax, %rax
709	jz	.handle_in_trap		/* should not happen? */
710#if LWP_PCB_FPU	!= 0
711	addq	$LWP_PCB_FPU, %rax	/* &lwp->lwp_pcb.pcb_fpu */
712#endif
713	testl	%ebx, PCB_FPU_FLAGS(%rax)
714	jz	.handle_in_trap		/* must be the first fault */
715	clts
716	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rax)
717#if FPU_CTX_FPU_REGS != 0
718	addq	$FPU_CTX_FPU_REGS, %rax
719#endif
720	/*
721	 * the label below is used in trap.c to detect FP faults in
722	 * kernel due to user fault.
723	 */
724	ALTENTRY(ndptrap_frstor)
725	FXRSTORQ	((%rax))
726	popq	%rbx
727	popq	%rax
728	IRET
729	/*NOTREACHED*/
730
731.handle_in_trap:
732	popq	%rbx
733	popq	%rax
734	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
735	jmp	cmninttrap
736	SET_SIZE(ndptrap_frstor)
737	SET_SIZE(ndptrap)
738
739#endif	/* __xpv */
740
741#elif defined(__i386)
742
743	ENTRY_NP(ndptrap)
744	/*
745	 * We want to do this quickly as every lwp using fp will take this
746	 * after a context switch -- we do the frequent path in fpnoextflt
747	 * below; for all other cases, we let the trap code handle it
748	 */
749	pushl	%eax
750	pushl	%ebx
751	pushl	%ds
752	pushl	%gs
753	movl	$KDS_SEL, %ebx
754	movw	%bx, %ds
755	movl	$KGS_SEL, %eax
756	movw	%ax, %gs
757	LOADCPU(%eax)
758	cmpl	$0, fpu_exists
759	je	.handle_in_trap		/* let trap handle no fp case */
760	movl	CPU_THREAD(%eax), %ebx	/* %ebx = curthread */
761	movl	$FPU_EN, %eax
762	movl	T_LWP(%ebx), %ebx	/* %ebx = lwp */
763	testl	%ebx, %ebx
764	jz	.handle_in_trap		/* should not happen? */
765#if LWP_PCB_FPU != 0
766	addl	$LWP_PCB_FPU, %ebx 	/* &lwp->lwp_pcb.pcb_fpu */
767#endif
768	testl	%eax, PCB_FPU_FLAGS(%ebx)
769	jz	.handle_in_trap		/* must be the first fault */
770	CLTS
771	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx)
772#if FPU_CTX_FPU_REGS != 0
773	addl	$FPU_CTX_FPU_REGS, %ebx
774#endif
775	/*
776	 * the label below is used in trap.c to detect FP faults in kernel
777	 * due to user fault.
778	 */
779	ALTENTRY(ndptrap_frstor)
780	.globl	_patch_fxrstor_ebx
781_patch_fxrstor_ebx:
782	frstor	(%ebx)		/* may be patched to fxrstor */
783	nop			/* (including this byte) */
784	popl	%gs
785	popl	%ds
786	popl	%ebx
787	popl	%eax
788	IRET
789
790.handle_in_trap:
791	popl	%gs
792	popl	%ds
793	popl	%ebx
794	popl	%eax
795	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
796	jmp	cmninttrap
797	SET_SIZE(ndptrap_frstor)
798	SET_SIZE(ndptrap)
799
800#endif	/* __i386 */
801
802#if !defined(__xpv)
803#if defined(__amd64)
804
805	/*
806	 * #DF
807	 */
808	ENTRY_NP(syserrtrap)
809	pushq	$T_DBLFLT
810	SET_CPU_GSBASE
811
812	/*
813	 * We share this handler with kmdb (if kmdb is loaded).  As such, we
814	 * may have reached this point after encountering a #df in kmdb.  If
815	 * that happens, we'll still be on kmdb's IDT.  We need to switch back
816	 * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
817	 * here from kmdb, kmdb is probably in a very sickly state, and
818	 * shouldn't be entered from the panic flow.  We'll suppress that
819	 * entry by setting nopanicdebug.
820	 */
821	pushq	%rax
822	subq	$DESCTBR_SIZE, %rsp
823	sidt	(%rsp)
824	movq	%gs:CPU_IDT, %rax
825	cmpq	%rax, DTR_BASE(%rsp)
826	je	1f
827
828	movq	%rax, DTR_BASE(%rsp)
829	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
830	lidt	(%rsp)
831
832	movl	$1, nopanicdebug
833
8341:	addq	$DESCTBR_SIZE, %rsp
835	popq	%rax
836
837	DFTRAP_PUSH
838
839	/*
840	 * freeze trap trace.
841	 */
842#ifdef TRAPTRACE
843	leaq	trap_trace_freeze(%rip), %r11
844	incl	(%r11)
845#endif
846
847	ENABLE_INTR_FLAGS
848
849	movq	%rsp, %rdi	/* &regs */
850	xorl	%esi, %esi	/* clear address */
851	xorl	%edx, %edx	/* cpuid = 0 */
852	call	trap
853
854	SET_SIZE(syserrtrap)
855
856#elif defined(__i386)
857
858	/*
859	 * #DF
860	 */
861	ENTRY_NP(syserrtrap)
862	cli				/* disable interrupts */
863
864	/*
865	 * We share this handler with kmdb (if kmdb is loaded).  As such, we
866	 * may have reached this point after encountering a #df in kmdb.  If
867	 * that happens, we'll still be on kmdb's IDT.  We need to switch back
868	 * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
869	 * here from kmdb, kmdb is probably in a very sickly state, and
870	 * shouldn't be entered from the panic flow.  We'll suppress that
871	 * entry by setting nopanicdebug.
872	 */
873
874	subl	$DESCTBR_SIZE, %esp
875	movl	%gs:CPU_IDT, %eax
876	sidt	(%esp)
877	cmpl	DTR_BASE(%esp), %eax
878	je	1f
879
880	movl	%eax, DTR_BASE(%esp)
881	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
882	lidt	(%esp)
883
884	movl	$1, nopanicdebug
885
8861:	addl	$DESCTBR_SIZE, %esp
887
888	/*
889	 * Check the CPL in the TSS to see what mode
890	 * (user or kernel) we took the fault in.  At this
891	 * point we are running in the context of the double
892	 * fault task (dftss) but the CPU's task points to
893	 * the previous task (ktss) where the process context
894	 * has been saved as the result of the task switch.
895	 */
896	movl	%gs:CPU_TSS, %eax	/* get the TSS */
897	movl	TSS_SS(%eax), %ebx	/* save the fault SS */
898	movl	TSS_ESP(%eax), %edx	/* save the fault ESP */
899	testw	$CPL_MASK, TSS_CS(%eax)	/* user mode ? */
900	jz	make_frame
901	movw	TSS_SS0(%eax), %ss	/* get on the kernel stack */
902	movl	TSS_ESP0(%eax), %esp
903
904	/*
905	 * Clear the NT flag to avoid a task switch when the process
906	 * finally pops the EFL off the stack via an iret.  Clear
907	 * the TF flag since that is what the processor does for
908	 * a normal exception. Clear the IE flag so that interrupts
909	 * remain disabled.
910	 */
911	movl	TSS_EFL(%eax), %ecx
912	andl	$_BITNOT(PS_NT|PS_T|PS_IE), %ecx
913	pushl	%ecx
914	popfl				/* restore the EFL */
915	movw	TSS_LDT(%eax), %cx	/* restore the LDT */
916	lldt	%cx
917
918	/*
919	 * Restore process segment selectors.
920	 */
921	movw	TSS_DS(%eax), %ds
922	movw	TSS_ES(%eax), %es
923	movw	TSS_FS(%eax), %fs
924	movw	TSS_GS(%eax), %gs
925
926	/*
927	 * Restore task segment selectors.
928	 */
929	movl	$KDS_SEL, TSS_DS(%eax)
930	movl	$KDS_SEL, TSS_ES(%eax)
931	movl	$KDS_SEL, TSS_SS(%eax)
932	movl	$KFS_SEL, TSS_FS(%eax)
933	movl	$KGS_SEL, TSS_GS(%eax)
934
935	/*
936	 * Clear the TS bit, the busy bits in both task
937	 * descriptors, and switch tasks.
938	 */
939	clts
940	leal	gdt0, %ecx
941	movl	DFTSS_SEL+4(%ecx), %esi
942	andl	$_BITNOT(0x200), %esi
943	movl	%esi, DFTSS_SEL+4(%ecx)
944	movl	KTSS_SEL+4(%ecx), %esi
945	andl	$_BITNOT(0x200), %esi
946	movl	%esi, KTSS_SEL+4(%ecx)
947	movw	$KTSS_SEL, %cx
948	ltr	%cx
949
950	/*
951	 * Restore part of the process registers.
952	 */
953	movl	TSS_EBP(%eax), %ebp
954	movl	TSS_ECX(%eax), %ecx
955	movl	TSS_ESI(%eax), %esi
956	movl	TSS_EDI(%eax), %edi
957
958make_frame:
959	/*
960	 * Make a trap frame.  Leave the error code (0) on
961	 * the stack since the first word on a trap stack is
962	 * unused anyway.
963	 */
964	pushl	%ebx			/ fault SS
965	pushl	%edx			/ fault ESP
966	pushl	TSS_EFL(%eax)		/ fault EFL
967	pushl	TSS_CS(%eax)		/ fault CS
968	pushl	TSS_EIP(%eax)		/ fault EIP
969	pushl	$0			/ error code
970	pushl	$T_DBLFLT		/ trap number 8
971	movl	TSS_EBX(%eax), %ebx	/ restore EBX
972	movl	TSS_EDX(%eax), %edx	/ restore EDX
973	movl	TSS_EAX(%eax), %eax	/ restore EAX
974	sti				/ enable interrupts
975	jmp	cmntrap
976	SET_SIZE(syserrtrap)
977
978#endif	/* __i386 */
979#endif	/* !__xpv */
980
981	ENTRY_NP(overrun)
982	push	$0
983	TRAP_NOERR(T_EXTOVRFLT)	/* $9 i386 only - not generated */
984	jmp	cmninttrap
985	SET_SIZE(overrun)
986
987	/*
988	 * #TS
989	 */
990	ENTRY_NP(invtsstrap)
991	TRAP_ERR(T_TSSFLT)	/* $10 already have error code on stack */
992	jmp	cmntrap
993	SET_SIZE(invtsstrap)
994
995	/*
996	 * #NP
997	 */
998	ENTRY_NP(segnptrap)
999	TRAP_ERR(T_SEGFLT)	/* $11 already have error code on stack */
1000#if defined(__amd64)
1001	SET_CPU_GSBASE
1002#endif
1003	jmp	cmntrap
1004	SET_SIZE(segnptrap)
1005
1006	/*
1007	 * #SS
1008	 */
1009	ENTRY_NP(stktrap)
1010	TRAP_ERR(T_STKFLT)	/* $12 already have error code on stack */
1011	jmp	cmntrap
1012	SET_SIZE(stktrap)
1013
1014	/*
1015	 * #GP
1016	 */
1017	ENTRY_NP(gptrap)
1018	TRAP_ERR(T_GPFLT)	/* $13 already have error code on stack */
1019#if defined(__amd64)
1020	SET_CPU_GSBASE
1021#endif
1022	jmp	cmntrap
1023	SET_SIZE(gptrap)
1024
1025	/*
1026	 * #PF
1027	 */
1028	ENTRY_NP(pftrap)
1029	TRAP_ERR(T_PGFLT)	/* $14 already have error code on stack */
1030	INTR_PUSH
1031#if defined(__xpv)
1032
1033#if defined(__amd64)
1034	movq	%gs:CPU_VCPU_INFO, %r15
1035	movq	VCPU_INFO_ARCH_CR2(%r15), %r15	/* vcpu[].arch.cr2 */
1036#elif defined(__i386)
1037	movl	%gs:CPU_VCPU_INFO, %esi
1038	movl	VCPU_INFO_ARCH_CR2(%esi), %esi	/* vcpu[].arch.cr2 */
1039#endif	/* __i386 */
1040
1041#else	/* __xpv */
1042
1043#if defined(__amd64)
1044	movq	%cr2, %r15
1045#elif defined(__i386)
1046	movl	%cr2, %esi
1047#endif	/* __i386 */
1048
1049#endif	/* __xpv */
1050	jmp	cmntrap_pushed
1051	SET_SIZE(pftrap)
1052
1053#if !defined(__amd64)
1054
1055	.globl	idt0_default_r
1056
1057	/*
1058	 * #PF pentium bug workaround
1059	 */
1060	ENTRY_NP(pentium_pftrap)
1061	pushl	%eax
1062	movl	%cr2, %eax
1063	andl	$MMU_STD_PAGEMASK, %eax
1064
1065	cmpl	%eax, %cs:idt0_default_r+2	/* fixme */
1066
1067	je	check_for_user_address
1068user_mode:
1069	popl	%eax
1070	pushl	$T_PGFLT	/* $14 */
1071	jmp	cmntrap
1072check_for_user_address:
1073	/*
1074	 * Before we assume that we have an unmapped trap on our hands,
1075	 * check to see if this is a fault from user mode.  If it is,
1076	 * we'll kick back into the page fault handler.
1077	 */
1078	movl	4(%esp), %eax	/* error code */
1079	andl	$PF_ERR_USER, %eax
1080	jnz	user_mode
1081
1082	/*
1083	 * We now know that this is the invalid opcode trap.
1084	 */
1085	popl	%eax
1086	addl	$4, %esp	/* pop error code */
1087	jmp	invoptrap
1088	SET_SIZE(pentium_pftrap)
1089
1090#endif	/* !__amd64 */
1091
1092	ENTRY_NP(resvtrap)
1093	TRAP_NOERR(15)		/* (reserved)  */
1094	jmp	cmntrap
1095	SET_SIZE(resvtrap)
1096
1097	/*
1098	 * #MF
1099	 */
1100	ENTRY_NP(ndperr)
1101	TRAP_NOERR(T_EXTERRFLT)	/* $16 */
1102	jmp	cmninttrap
1103	SET_SIZE(ndperr)
1104
1105	/*
1106	 * #AC
1107	 */
1108	ENTRY_NP(achktrap)
1109	TRAP_ERR(T_ALIGNMENT)	/* $17 */
1110	jmp	cmntrap
1111	SET_SIZE(achktrap)
1112
1113	/*
1114	 * #MC
1115	 */
1116	.globl	cmi_mca_trap	/* see uts/i86pc/os/cmi.c */
1117
1118#if defined(__amd64)
1119
1120	ENTRY_NP(mcetrap)
1121	TRAP_NOERR(T_MCE)	/* $18 */
1122
1123	SET_CPU_GSBASE
1124
1125	INTR_PUSH
1126	INTGATE_INIT_KERNEL_FLAGS
1127
1128	TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
1129	TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
1130	TRACE_STAMP(%rdi)
1131
1132	movq	%rsp, %rbp
1133
1134	movq	%rsp, %rdi	/* arg0 = struct regs *rp */
1135	call	cmi_mca_trap	/* cmi_mca_trap(rp); */
1136
1137	jmp	_sys_rtt
1138	SET_SIZE(mcetrap)
1139
1140#else
1141
1142	ENTRY_NP(mcetrap)
1143	TRAP_NOERR(T_MCE)	/* $18 */
1144
1145	INTR_PUSH
1146	INTGATE_INIT_KERNEL_FLAGS
1147
1148	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1149	TRACE_REGS(%edi, %esp, %ebx, %ecx)
1150	TRACE_STAMP(%edi)
1151
1152	movl	%esp, %ebp
1153
1154	movl	%esp, %ecx
1155	pushl	%ecx		/* arg0 = struct regs *rp */
1156	call	cmi_mca_trap	/* cmi_mca_trap(rp) */
1157	addl	$4, %esp	/* pop arg0 */
1158
1159	jmp	_sys_rtt
1160	SET_SIZE(mcetrap)
1161
1162#endif
1163
1164	/*
1165	 * #XF
1166	 */
1167	ENTRY_NP(xmtrap)
1168	TRAP_NOERR(T_SIMDFPE)	/* $19 */
1169	jmp	cmninttrap
1170	SET_SIZE(xmtrap)
1171
1172	ENTRY_NP(invaltrap)
1173	TRAP_NOERR(30)		/* very invalid */
1174	jmp	cmntrap
1175	SET_SIZE(invaltrap)
1176
1177	ENTRY_NP(invalint)
1178	TRAP_NOERR(31)		/* even more so */
1179	jmp	cmnint
1180	SET_SIZE(invalint)
1181
1182	.globl	fasttable
1183
1184#if defined(__amd64)
1185
1186	ENTRY_NP(fasttrap)
1187	cmpl	$T_LASTFAST, %eax
1188	ja	1f
1189	orl	%eax, %eax	/* (zero extend top 32-bits) */
1190	leaq	fasttable(%rip), %r11
1191	leaq	(%r11, %rax, CLONGSIZE), %r11
1192	jmp	*(%r11)
11931:
1194	/*
1195	 * Fast syscall number was illegal.  Make it look
1196	 * as if the INT failed.  Modify %rip to point before the
1197	 * INT, push the expected error code and fake a GP fault.
1198	 *
1199	 * XXX Why make the error code be offset into idt + 1?
1200	 * Instead we should push a real (soft?) error code
1201	 * on the stack and #gp handler could know about fasttraps?
1202	 */
1203	XPV_TRAP_POP
1204
1205	subq	$2, (%rsp)	/* XXX int insn 2-bytes */
1206	pushq	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1207
1208#if defined(__xpv)
1209	pushq	%r11
1210	pushq	%rcx
1211#endif
1212	jmp	gptrap
1213	SET_SIZE(fasttrap)
1214
1215#elif defined(__i386)
1216
1217	ENTRY_NP(fasttrap)
1218	cmpl	$T_LASTFAST, %eax
1219	ja	1f
1220	jmp	*%cs:fasttable(, %eax, CLONGSIZE)
12211:
1222	/*
1223	 * Fast syscall number was illegal.  Make it look
1224	 * as if the INT failed.  Modify %eip to point before the
1225	 * INT, push the expected error code and fake a GP fault.
1226	 *
1227	 * XXX Why make the error code be offset into idt + 1?
1228	 * Instead we should push a real (soft?) error code
1229	 * on the stack and #gp handler could know about fasttraps?
1230	 */
1231	subl	$2, (%esp)	/* XXX int insn 2-bytes */
1232	pushl	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1233	jmp	gptrap
1234	SET_SIZE(fasttrap)
1235
1236#endif	/* __i386 */
1237
1238	ENTRY_NP(dtrace_ret)
1239	TRAP_NOERR(T_DTRACE_RET)
1240	jmp	dtrace_trap
1241	SET_SIZE(dtrace_ret)
1242
1243#if defined(__amd64)
1244
1245	/*
1246	 * RFLAGS 24 bytes up the stack from %rsp.
1247	 * XXX a constant would be nicer.
1248	 */
1249	ENTRY_NP(fast_null)
1250	XPV_TRAP_POP
1251	orq	$PS_C, 24(%rsp)	/* set carry bit in user flags */
1252	IRET
1253	/*NOTREACHED*/
1254	SET_SIZE(fast_null)
1255
1256#elif defined(__i386)
1257
1258	ENTRY_NP(fast_null)
1259	orw	$PS_C, 8(%esp)	/* set carry bit in user flags */
1260	IRET
1261	SET_SIZE(fast_null)
1262
1263#endif	/* __i386 */
1264
1265	/*
1266	 * Interrupts start at 32
1267	 */
1268#define MKIVCT(n)			\
1269	ENTRY_NP(ivct/**/n)		\
1270	push	$0;			\
1271	push	$n - 0x20;		\
1272	jmp	cmnint;			\
1273	SET_SIZE(ivct/**/n)
1274
1275	MKIVCT(32)
1276	MKIVCT(33)
1277	MKIVCT(34)
1278	MKIVCT(35)
1279	MKIVCT(36)
1280	MKIVCT(37)
1281	MKIVCT(38)
1282	MKIVCT(39)
1283	MKIVCT(40)
1284	MKIVCT(41)
1285	MKIVCT(42)
1286	MKIVCT(43)
1287	MKIVCT(44)
1288	MKIVCT(45)
1289	MKIVCT(46)
1290	MKIVCT(47)
1291	MKIVCT(48)
1292	MKIVCT(49)
1293	MKIVCT(50)
1294	MKIVCT(51)
1295	MKIVCT(52)
1296	MKIVCT(53)
1297	MKIVCT(54)
1298	MKIVCT(55)
1299	MKIVCT(56)
1300	MKIVCT(57)
1301	MKIVCT(58)
1302	MKIVCT(59)
1303	MKIVCT(60)
1304	MKIVCT(61)
1305	MKIVCT(62)
1306	MKIVCT(63)
1307	MKIVCT(64)
1308	MKIVCT(65)
1309	MKIVCT(66)
1310	MKIVCT(67)
1311	MKIVCT(68)
1312	MKIVCT(69)
1313	MKIVCT(70)
1314	MKIVCT(71)
1315	MKIVCT(72)
1316	MKIVCT(73)
1317	MKIVCT(74)
1318	MKIVCT(75)
1319	MKIVCT(76)
1320	MKIVCT(77)
1321	MKIVCT(78)
1322	MKIVCT(79)
1323	MKIVCT(80)
1324	MKIVCT(81)
1325	MKIVCT(82)
1326	MKIVCT(83)
1327	MKIVCT(84)
1328	MKIVCT(85)
1329	MKIVCT(86)
1330	MKIVCT(87)
1331	MKIVCT(88)
1332	MKIVCT(89)
1333	MKIVCT(90)
1334	MKIVCT(91)
1335	MKIVCT(92)
1336	MKIVCT(93)
1337	MKIVCT(94)
1338	MKIVCT(95)
1339	MKIVCT(96)
1340	MKIVCT(97)
1341	MKIVCT(98)
1342	MKIVCT(99)
1343	MKIVCT(100)
1344	MKIVCT(101)
1345	MKIVCT(102)
1346	MKIVCT(103)
1347	MKIVCT(104)
1348	MKIVCT(105)
1349	MKIVCT(106)
1350	MKIVCT(107)
1351	MKIVCT(108)
1352	MKIVCT(109)
1353	MKIVCT(110)
1354	MKIVCT(111)
1355	MKIVCT(112)
1356	MKIVCT(113)
1357	MKIVCT(114)
1358	MKIVCT(115)
1359	MKIVCT(116)
1360	MKIVCT(117)
1361	MKIVCT(118)
1362	MKIVCT(119)
1363	MKIVCT(120)
1364	MKIVCT(121)
1365	MKIVCT(122)
1366	MKIVCT(123)
1367	MKIVCT(124)
1368	MKIVCT(125)
1369	MKIVCT(126)
1370	MKIVCT(127)
1371	MKIVCT(128)
1372	MKIVCT(129)
1373	MKIVCT(130)
1374	MKIVCT(131)
1375	MKIVCT(132)
1376	MKIVCT(133)
1377	MKIVCT(134)
1378	MKIVCT(135)
1379	MKIVCT(136)
1380	MKIVCT(137)
1381	MKIVCT(138)
1382	MKIVCT(139)
1383	MKIVCT(140)
1384	MKIVCT(141)
1385	MKIVCT(142)
1386	MKIVCT(143)
1387	MKIVCT(144)
1388	MKIVCT(145)
1389	MKIVCT(146)
1390	MKIVCT(147)
1391	MKIVCT(148)
1392	MKIVCT(149)
1393	MKIVCT(150)
1394	MKIVCT(151)
1395	MKIVCT(152)
1396	MKIVCT(153)
1397	MKIVCT(154)
1398	MKIVCT(155)
1399	MKIVCT(156)
1400	MKIVCT(157)
1401	MKIVCT(158)
1402	MKIVCT(159)
1403	MKIVCT(160)
1404	MKIVCT(161)
1405	MKIVCT(162)
1406	MKIVCT(163)
1407	MKIVCT(164)
1408	MKIVCT(165)
1409	MKIVCT(166)
1410	MKIVCT(167)
1411	MKIVCT(168)
1412	MKIVCT(169)
1413	MKIVCT(170)
1414	MKIVCT(171)
1415	MKIVCT(172)
1416	MKIVCT(173)
1417	MKIVCT(174)
1418	MKIVCT(175)
1419	MKIVCT(176)
1420	MKIVCT(177)
1421	MKIVCT(178)
1422	MKIVCT(179)
1423	MKIVCT(180)
1424	MKIVCT(181)
1425	MKIVCT(182)
1426	MKIVCT(183)
1427	MKIVCT(184)
1428	MKIVCT(185)
1429	MKIVCT(186)
1430	MKIVCT(187)
1431	MKIVCT(188)
1432	MKIVCT(189)
1433	MKIVCT(190)
1434	MKIVCT(191)
1435	MKIVCT(192)
1436	MKIVCT(193)
1437	MKIVCT(194)
1438	MKIVCT(195)
1439	MKIVCT(196)
1440	MKIVCT(197)
1441	MKIVCT(198)
1442	MKIVCT(199)
1443	MKIVCT(200)
1444	MKIVCT(201)
1445	MKIVCT(202)
1446	MKIVCT(203)
1447	MKIVCT(204)
1448	MKIVCT(205)
1449	MKIVCT(206)
1450	MKIVCT(207)
1451	MKIVCT(208)
1452	MKIVCT(209)
1453	MKIVCT(210)
1454	MKIVCT(211)
1455	MKIVCT(212)
1456	MKIVCT(213)
1457	MKIVCT(214)
1458	MKIVCT(215)
1459	MKIVCT(216)
1460	MKIVCT(217)
1461	MKIVCT(218)
1462	MKIVCT(219)
1463	MKIVCT(220)
1464	MKIVCT(221)
1465	MKIVCT(222)
1466	MKIVCT(223)
1467	MKIVCT(224)
1468	MKIVCT(225)
1469	MKIVCT(226)
1470	MKIVCT(227)
1471	MKIVCT(228)
1472	MKIVCT(229)
1473	MKIVCT(230)
1474	MKIVCT(231)
1475	MKIVCT(232)
1476	MKIVCT(233)
1477	MKIVCT(234)
1478	MKIVCT(235)
1479	MKIVCT(236)
1480	MKIVCT(237)
1481	MKIVCT(238)
1482	MKIVCT(239)
1483	MKIVCT(240)
1484	MKIVCT(241)
1485	MKIVCT(242)
1486	MKIVCT(243)
1487	MKIVCT(244)
1488	MKIVCT(245)
1489	MKIVCT(246)
1490	MKIVCT(247)
1491	MKIVCT(248)
1492	MKIVCT(249)
1493	MKIVCT(250)
1494	MKIVCT(251)
1495	MKIVCT(252)
1496	MKIVCT(253)
1497	MKIVCT(254)
1498	MKIVCT(255)
1499
1500#endif	/* __lint */
1501