xref: /titanic_52/usr/src/uts/intel/ia32/ml/exception.s (revision 2b24ab6b3865caeede9eeb9db6b83e1d89dcd1ea)
1/*
2 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3 * Use is subject to license terms.
4 */
5
6/*
7 * Copyright (c) 1989, 1990 William F. Jolitz.
8 * Copyright (c) 1990 The Regents of the University of California.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the University of
22 *	California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 *    may be used to endorse or promote products derived from this software
25 *    without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
40 */
41
42#include <sys/asm_linkage.h>
43#include <sys/asm_misc.h>
44#include <sys/trap.h>
45#include <sys/psw.h>
46#include <sys/regset.h>
47#include <sys/privregs.h>
48#include <sys/dtrace.h>
49#include <sys/x86_archext.h>
50#include <sys/traptrace.h>
51#include <sys/machparam.h>
52
53/*
54 * only one routine in this file is interesting to lint
55 */
56
57#if defined(__lint)
58
59void
60ndptrap_frstor(void)
61{}
62
63#else
64
65#include "assym.h"
66
67/*
68 * push $0 on stack for traps that do not
69 * generate an error code. This is so the rest
70 * of the kernel can expect a consistent stack
71 * from from any exception.
72 *
73 * Note that for all exceptions for amd64
74 * %r11 and %rcx are on the stack. Just pop
75 * them back into their appropriate registers and let
76 * it get saved as is running native.
77 */
78
79#if defined(__xpv) && defined(__amd64)
80
81#define	NPTRAP_NOERR(trapno)	\
82	pushq	$0;		\
83	pushq	$trapno
84
85#define	TRAP_NOERR(trapno)	\
86	XPV_TRAP_POP;		\
87	NPTRAP_NOERR(trapno)
88
89/*
90 * error code already pushed by hw
91 * onto stack.
92 */
93#define	TRAP_ERR(trapno)	\
94	XPV_TRAP_POP;		\
95	pushq	$trapno
96
97#else /* __xpv && __amd64 */
98
99#define	TRAP_NOERR(trapno)	\
100	push	$0;		\
101	push	$trapno
102
103#define	NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
104
105/*
106 * error code already pushed by hw
107 * onto stack.
108 */
109#define	TRAP_ERR(trapno)	\
110	push	$trapno
111
112#endif	/* __xpv && __amd64 */
113
114
115	/*
116	 * #DE
117	 */
118	ENTRY_NP(div0trap)
119	TRAP_NOERR(T_ZERODIV)	/* $0 */
120	jmp	cmntrap
121	SET_SIZE(div0trap)
122
123	/*
124	 * #DB
125	 *
126	 * Fetch %dr6 and clear it, handing off the value to the
127	 * cmntrap code in %r15/%esi
128	 */
129	ENTRY_NP(dbgtrap)
130	TRAP_NOERR(T_SGLSTP)	/* $1 */
131
132#if defined(__amd64)
133#if !defined(__xpv)		/* no sysenter support yet */
134	/*
135	 * If we get here as a result of single-stepping a sysenter
136	 * instruction, we suddenly find ourselves taking a #db
137	 * in kernel mode -before- we've swapgs'ed.  So before we can
138	 * take the trap, we do the swapgs here, and fix the return
139	 * %rip in trap() so that we return immediately after the
140	 * swapgs in the sysenter handler to avoid doing the swapgs again.
141	 *
142	 * Nobody said that the design of sysenter was particularly
143	 * elegant, did they?
144	 */
145
146	pushq	%r11
147
148	/*
149	 * At this point the stack looks like this:
150	 *
151	 * (high address) 	r_ss
152	 *			r_rsp
153	 *			r_rfl
154	 *			r_cs
155	 *			r_rip		<-- %rsp + 24
156	 *			r_err		<-- %rsp + 16
157	 *			r_trapno	<-- %rsp + 8
158	 * (low address)	%r11		<-- %rsp
159	 */
160	leaq	sys_sysenter(%rip), %r11
161	cmpq	%r11, 24(%rsp)	/* Compare to saved r_rip on the stack */
162	je	1f
163	leaq	brand_sys_sysenter(%rip), %r11
164	cmpq	%r11, 24(%rsp)	/* Compare to saved r_rip on the stack */
165	jne	2f
1661:	SWAPGS
1672:	popq	%r11
168#endif	/* !__xpv */
169
170	INTR_PUSH
171#if defined(__xpv)
172	movl	$6, %edi
173	call	kdi_dreg_get
174	movq	%rax, %r15		/* %db6 -> %r15 */
175	movl	$6, %edi
176	movl	$0, %esi
177	call	kdi_dreg_set		/* 0 -> %db6 */
178#else
179	movq	%db6, %r15
180	xorl	%eax, %eax
181	movq	%rax, %db6
182#endif
183
184#elif defined(__i386)
185
186	INTR_PUSH
187#if defined(__xpv)
188	pushl	$6
189	call	kdi_dreg_get
190	addl	$4, %esp
191	movl	%eax, %esi		/* %dr6 -> %esi */
192	pushl	$0
193	pushl	$6
194	call	kdi_dreg_set		/* 0 -> %dr6 */
195	addl	$8, %esp
196#else
197	movl	%db6, %esi
198	xorl	%eax, %eax
199	movl	%eax, %db6
200#endif
201#endif	/* __i386 */
202
203	jmp	cmntrap_pushed
204	SET_SIZE(dbgtrap)
205
206#if defined(__amd64)
207#if !defined(__xpv)
208
209/*
210 * Macro to set the gsbase or kgsbase to the address of the struct cpu
211 * for this processor.  If we came from userland, set kgsbase else
212 * set gsbase.  We find the proper cpu struct by looping through
213 * the cpu structs for all processors till we find a match for the gdt
214 * of the trapping processor.  The stack is expected to be pointing at
215 * the standard regs pushed by hardware on a trap (plus error code and trapno).
216 */
217#define	SET_CPU_GSBASE							\
218	subq	$REGOFF_TRAPNO, %rsp;	/* save regs */			\
219	movq	%rax, REGOFF_RAX(%rsp);					\
220	movq	%rbx, REGOFF_RBX(%rsp);					\
221	movq	%rcx, REGOFF_RCX(%rsp);					\
222	movq	%rdx, REGOFF_RDX(%rsp);					\
223	movq	%rbp, REGOFF_RBP(%rsp);					\
224	movq	%rsp, %rbp;						\
225	subq	$16, %rsp;		/* space for gdt */		\
226	sgdt	6(%rsp);						\
227	movq	8(%rsp), %rcx;		/* %rcx has gdt to match */	\
228	xorl	%ebx, %ebx;		/* loop index */		\
229	leaq	cpu(%rip), %rdx;	/* cpu pointer array */		\
2301:									\
231	movq	(%rdx, %rbx, CLONGSIZE), %rax;	/* get cpu[i] */	\
232	cmpq	$0x0, %rax;		/* cpu[i] == NULL ? */		\
233	je	2f;			/* yes, continue */		\
234	cmpq	%rcx, CPU_GDT(%rax);	/* gdt == cpu[i]->cpu_gdt ? */	\
235	je	3f;			/* yes, go set gsbase */	\
2362:									\
237	incl	%ebx;			/* i++ */			\
238	cmpl	$NCPU, %ebx;		/* i < NCPU ? */		\
239	jb	1b;			/* yes, loop */			\
240/* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */	\
2413:									\
242	movl	$MSR_AMD_KGSBASE, %ecx;					\
243	cmpw	$KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */	\
244	jne	4f;			/* no, go set KGSBASE */	\
245	movl	$MSR_AMD_GSBASE, %ecx;	/* yes, set GSBASE */		\
246        mfence;				/* OPTERON_ERRATUM_88 */	\
2474:									\
248	movq	%rax, %rdx;		/* write base register */	\
249	shrq	$32, %rdx;						\
250	wrmsr;								\
251	movq	REGOFF_RDX(%rbp), %rdx;	/* restore regs */		\
252	movq	REGOFF_RCX(%rbp), %rcx;					\
253	movq	REGOFF_RBX(%rbp), %rbx;					\
254	movq	REGOFF_RAX(%rbp), %rax;					\
255	movq	%rbp, %rsp;						\
256	movq	REGOFF_RBP(%rsp), %rbp;					\
257	addq	$REGOFF_TRAPNO, %rsp	/* pop stack */
258
259#else	/* __xpv */
260
261#define	SET_CPU_GSBASE	/* noop on the hypervisor */
262
263#endif	/* __xpv */
264#endif	/* __amd64 */
265
266
267#if defined(__amd64)
268
269	/*
270	 * #NMI
271	 *
272	 * XXPV: See 6532669.
273	 */
274	ENTRY_NP(nmiint)
275	TRAP_NOERR(T_NMIFLT)	/* $2 */
276
277	SET_CPU_GSBASE
278
279	/*
280	 * Save all registers and setup segment registers
281	 * with kernel selectors.
282	 */
283	INTR_PUSH
284	INTGATE_INIT_KERNEL_FLAGS
285
286	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
287	TRACE_REGS(%r12, %rsp, %rax, %rbx)
288	TRACE_STAMP(%r12)
289
290	movq	%rsp, %rbp
291
292	movq	%rbp, %rdi
293	call	av_dispatch_nmivect
294
295	INTR_POP
296	IRET
297	/*NOTREACHED*/
298	SET_SIZE(nmiint)
299
300#elif defined(__i386)
301
302	/*
303	 * #NMI
304	 */
305	ENTRY_NP(nmiint)
306	TRAP_NOERR(T_NMIFLT)	/* $2 */
307
308	/*
309	 * Save all registers and setup segment registers
310	 * with kernel selectors.
311	 */
312	INTR_PUSH
313	INTGATE_INIT_KERNEL_FLAGS
314
315	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
316	TRACE_REGS(%edi, %esp, %ebx, %ecx)
317	TRACE_STAMP(%edi)
318
319	movl	%esp, %ebp
320
321	pushl	%ebp
322	call	av_dispatch_nmivect
323	addl	$4, %esp
324
325	INTR_POP_USER
326	IRET
327	SET_SIZE(nmiint)
328
329#endif	/* __i386 */
330
331	/*
332	 * #BP
333	 */
334	ENTRY_NP(brktrap)
335
336#if defined(__amd64)
337	XPV_TRAP_POP
338	cmpw	$KCS_SEL, 8(%rsp)
339	jne	bp_user
340
341	/*
342	 * This is a breakpoint in the kernel -- it is very likely that this
343	 * is DTrace-induced.  To unify DTrace handling, we spoof this as an
344	 * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
345	 * we must decrement the trapping %rip to make it appear as a fault.
346	 * We then push a non-zero error code to indicate that this is coming
347	 * from #BP.
348	 */
349	decq	(%rsp)
350	push	$1			/* error code -- non-zero for #BP */
351	jmp	ud_kernel
352
353bp_user:
354#endif /* __amd64 */
355
356	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
357	jmp	dtrace_trap
358
359	SET_SIZE(brktrap)
360
361	/*
362	 * #OF
363	 */
364	ENTRY_NP(ovflotrap)
365	TRAP_NOERR(T_OVFLW)	/* $4 */
366	jmp	cmntrap
367	SET_SIZE(ovflotrap)
368
369	/*
370	 * #BR
371	 */
372	ENTRY_NP(boundstrap)
373	TRAP_NOERR(T_BOUNDFLT)	/* $5 */
374	jmp	cmntrap
375	SET_SIZE(boundstrap)
376
377#if defined(__amd64)
378
379	ENTRY_NP(invoptrap)
380
381	XPV_TRAP_POP
382
383	cmpw	$KCS_SEL, 8(%rsp)
384	jne	ud_user
385
386#if defined(__xpv)
387	movb	$0, 12(%rsp)		/* clear saved upcall_mask from %cs */
388#endif
389	push	$0			/* error code -- zero for #UD */
390ud_kernel:
391	push	$0xdddd			/* a dummy trap number */
392	INTR_PUSH
393	movq	REGOFF_RIP(%rsp), %rdi
394	movq	REGOFF_RSP(%rsp), %rsi
395	movq	REGOFF_RAX(%rsp), %rdx
396	pushq	(%rsi)
397	movq	%rsp, %rsi
398	call	dtrace_invop
399	ALTENTRY(dtrace_invop_callsite)
400	addq	$8, %rsp
401	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
402	je	ud_push
403	cmpl	$DTRACE_INVOP_LEAVE, %eax
404	je	ud_leave
405	cmpl	$DTRACE_INVOP_NOP, %eax
406	je	ud_nop
407	cmpl	$DTRACE_INVOP_RET, %eax
408	je	ud_ret
409	jmp	ud_trap
410
411ud_push:
412	/*
413	 * We must emulate a "pushq %rbp".  To do this, we pull the stack
414	 * down 8 bytes, and then store the base pointer.
415	 */
416	INTR_POP
417	subq	$16, %rsp		/* make room for %rbp */
418	pushq	%rax			/* push temp */
419	movq	24(%rsp), %rax		/* load calling RIP */
420	addq	$1, %rax		/* increment over trapping instr */
421	movq	%rax, 8(%rsp)		/* store calling RIP */
422	movq	32(%rsp), %rax		/* load calling CS */
423	movq	%rax, 16(%rsp)		/* store calling CS */
424	movq	40(%rsp), %rax		/* load calling RFLAGS */
425	movq	%rax, 24(%rsp)		/* store calling RFLAGS */
426	movq	48(%rsp), %rax		/* load calling RSP */
427	subq	$8, %rax		/* make room for %rbp */
428	movq	%rax, 32(%rsp)		/* store calling RSP */
429	movq	56(%rsp), %rax		/* load calling SS */
430	movq	%rax, 40(%rsp)		/* store calling SS */
431	movq	32(%rsp), %rax		/* reload calling RSP */
432	movq	%rbp, (%rax)		/* store %rbp there */
433	popq	%rax			/* pop off temp */
434	IRET				/* return from interrupt */
435	/*NOTREACHED*/
436
437ud_leave:
438	/*
439	 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
440	 * followed by a "popq %rbp".  This is quite a bit simpler on amd64
441	 * than it is on i386 -- we can exploit the fact that the %rsp is
442	 * explicitly saved to effect the pop without having to reshuffle
443	 * the other data pushed for the trap.
444	 */
445	INTR_POP
446	pushq	%rax			/* push temp */
447	movq	8(%rsp), %rax		/* load calling RIP */
448	addq	$1, %rax		/* increment over trapping instr */
449	movq	%rax, 8(%rsp)		/* store calling RIP */
450	movq	(%rbp), %rax		/* get new %rbp */
451	addq	$8, %rbp		/* adjust new %rsp */
452	movq	%rbp, 32(%rsp)		/* store new %rsp */
453	movq	%rax, %rbp		/* set new %rbp */
454	popq	%rax			/* pop off temp */
455	IRET				/* return from interrupt */
456	/*NOTREACHED*/
457
458ud_nop:
459	/*
460	 * We must emulate a "nop".  This is obviously not hard:  we need only
461	 * advance the %rip by one.
462	 */
463	INTR_POP
464	incq	(%rsp)
465	IRET
466	/*NOTREACHED*/
467
468ud_ret:
469	INTR_POP
470	pushq	%rax			/* push temp */
471	movq	32(%rsp), %rax		/* load %rsp */
472	movq	(%rax), %rax		/* load calling RIP */
473	movq	%rax, 8(%rsp)		/* store calling RIP */
474	addq	$8, 32(%rsp)		/* adjust new %rsp */
475	popq	%rax			/* pop off temp */
476	IRET				/* return from interrupt */
477	/*NOTREACHED*/
478
479ud_trap:
480	/*
481	 * We're going to let the kernel handle this as a normal #UD.  If,
482	 * however, we came through #BP and are spoofing #UD (in this case,
483	 * the stored error value will be non-zero), we need to de-spoof
484	 * the trap by incrementing %rip and pushing T_BPTFLT.
485	 */
486	cmpq	$0, REGOFF_ERR(%rsp)
487	je	ud_ud
488	incq	REGOFF_RIP(%rsp)
489	addq	$REGOFF_RIP, %rsp
490	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
491	jmp	cmntrap
492
493ud_ud:
494	addq	$REGOFF_RIP, %rsp
495ud_user:
496	NPTRAP_NOERR(T_ILLINST)
497	jmp	cmntrap
498	SET_SIZE(invoptrap)
499
500#elif defined(__i386)
501
502	/*
503	 * #UD
504	 */
505	ENTRY_NP(invoptrap)
506	/*
507	 * If we are taking an invalid opcode trap while in the kernel, this
508	 * is likely an FBT probe point.
509	 */
510	pushl   %gs
511	cmpw	$KGS_SEL, (%esp)
512	jne	8f
513
514	addl	$4, %esp
515#if defined(__xpv)
516	movb	$0, 6(%esp)		/* clear saved upcall_mask from %cs */
517#endif	/* __xpv */
518	pusha
519	pushl	%eax			/* push %eax -- may be return value */
520	pushl	%esp			/* push stack pointer */
521	addl	$48, (%esp)		/* adjust to incoming args */
522	pushl	40(%esp)		/* push calling EIP */
523	call	dtrace_invop
524	ALTENTRY(dtrace_invop_callsite)
525	addl	$12, %esp
526	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
527	je	1f
528	cmpl	$DTRACE_INVOP_POPL_EBP, %eax
529	je	2f
530	cmpl	$DTRACE_INVOP_LEAVE, %eax
531	je	3f
532	cmpl	$DTRACE_INVOP_NOP, %eax
533	je	4f
534	jmp	7f
5351:
536	/*
537	 * We must emulate a "pushl %ebp".  To do this, we pull the stack
538	 * down 4 bytes, and then store the base pointer.
539	 */
540	popa
541	subl	$4, %esp		/* make room for %ebp */
542	pushl	%eax			/* push temp */
543	movl	8(%esp), %eax		/* load calling EIP */
544	incl	%eax			/* increment over LOCK prefix */
545	movl	%eax, 4(%esp)		/* store calling EIP */
546	movl	12(%esp), %eax		/* load calling CS */
547	movl	%eax, 8(%esp)		/* store calling CS */
548	movl	16(%esp), %eax		/* load calling EFLAGS */
549	movl	%eax, 12(%esp)		/* store calling EFLAGS */
550	movl	%ebp, 16(%esp)		/* push %ebp */
551	popl	%eax			/* pop off temp */
552	jmp	_emul_done
5532:
554	/*
555	 * We must emulate a "popl %ebp".  To do this, we do the opposite of
556	 * the above:  we remove the %ebp from the stack, and squeeze up the
557	 * saved state from the trap.
558	 */
559	popa
560	pushl	%eax			/* push temp */
561	movl	16(%esp), %ebp		/* pop %ebp */
562	movl	12(%esp), %eax		/* load calling EFLAGS */
563	movl	%eax, 16(%esp)		/* store calling EFLAGS */
564	movl	8(%esp), %eax		/* load calling CS */
565	movl	%eax, 12(%esp)		/* store calling CS */
566	movl	4(%esp), %eax		/* load calling EIP */
567	incl	%eax			/* increment over LOCK prefix */
568	movl	%eax, 8(%esp)		/* store calling EIP */
569	popl	%eax			/* pop off temp */
570	addl	$4, %esp		/* adjust stack pointer */
571	jmp	_emul_done
5723:
573	/*
574	 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
575	 * followed by a "popl %ebp".  This looks similar to the above, but
576	 * requires two temporaries:  one for the new base pointer, and one
577	 * for the staging register.
578	 */
579	popa
580	pushl	%eax			/* push temp */
581	pushl	%ebx			/* push temp */
582	movl	%ebp, %ebx		/* set temp to old %ebp */
583	movl	(%ebx), %ebp		/* pop %ebp */
584	movl	16(%esp), %eax		/* load calling EFLAGS */
585	movl	%eax, (%ebx)		/* store calling EFLAGS */
586	movl	12(%esp), %eax		/* load calling CS */
587	movl	%eax, -4(%ebx)		/* store calling CS */
588	movl	8(%esp), %eax		/* load calling EIP */
589	incl	%eax			/* increment over LOCK prefix */
590	movl	%eax, -8(%ebx)		/* store calling EIP */
591	movl	%ebx, -4(%esp)		/* temporarily store new %esp */
592	popl	%ebx			/* pop off temp */
593	popl	%eax			/* pop off temp */
594	movl	-12(%esp), %esp		/* set stack pointer */
595	subl	$8, %esp		/* adjust for three pushes, one pop */
596	jmp	_emul_done
5974:
598	/*
599	 * We must emulate a "nop".  This is obviously not hard:  we need only
600	 * advance the %eip by one.
601	 */
602	popa
603	incl	(%esp)
604_emul_done:
605	IRET				/* return from interrupt */
6067:
607	popa
608	pushl	$0
609	pushl	$T_ILLINST	/* $6 */
610	jmp	cmntrap
6118:
612	addl	$4, %esp
613	pushl	$0
614	pushl	$T_ILLINST	/* $6 */
615	jmp	cmntrap
616	SET_SIZE(invoptrap)
617
618#endif	/* __i386 */
619
620#if defined(__amd64)
621
622	/*
623	 * #NM
624	 */
625#if defined(__xpv)
626
627	ENTRY_NP(ndptrap)
628	/*
629	 * (On the hypervisor we must make a hypercall so we might as well
630	 * save everything and handle as in a normal trap.)
631	 */
632	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
633	INTR_PUSH
634
635	/*
636	 * We want to do this quickly as every lwp using fp will take this
637	 * after a context switch -- we do the frequent path in ndptrap_frstor
638	 * below; for all other cases, we let the trap code handle it
639	 */
640	LOADCPU(%rbx)			/* swapgs handled in hypervisor */
641	cmpl	$0, fpu_exists(%rip)
642	je	.handle_in_trap		/* let trap handle no fp case */
643	movq	CPU_THREAD(%rbx), %r15	/* %r15 = curthread */
644	movl	$FPU_EN, %ebx
645	movq	T_LWP(%r15), %r15	/* %r15 = lwp */
646	testq	%r15, %r15
647	jz	.handle_in_trap		/* should not happen? */
648#if LWP_PCB_FPU	!= 0
649	addq	$LWP_PCB_FPU, %r15	/* &lwp->lwp_pcb.pcb_fpu */
650#endif
651	testl	%ebx, PCB_FPU_FLAGS(%r15)
652	jz	.handle_in_trap		/* must be the first fault */
653	CLTS
654	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%r15)
655#if FPU_CTX_FPU_REGS != 0
656	addq	$FPU_CTX_FPU_REGS, %r15
657#endif
658	/*
659	 * the label below is used in trap.c to detect FP faults in
660	 * kernel due to user fault.
661	 */
662	ALTENTRY(ndptrap_frstor)
663	FXRSTORQ	((%r15))
664	cmpw	$KCS_SEL, REGOFF_CS(%rsp)
665	je	.return_to_kernel
666
667	ASSERT_UPCALL_MASK_IS_SET
668	USER_POP
669	IRET				/* return to user mode */
670	/*NOTREACHED*/
671
672.return_to_kernel:
673	INTR_POP
674	IRET
675	/*NOTREACHED*/
676
677.handle_in_trap:
678	INTR_POP
679	pushq	$0			/* can not use TRAP_NOERR */
680	pushq	$T_NOEXTFLT
681	jmp	cmninttrap
682	SET_SIZE(ndptrap_frstor)
683	SET_SIZE(ndptrap)
684
685#else	/* __xpv */
686
687	ENTRY_NP(ndptrap)
688	/*
689	 * We want to do this quickly as every lwp using fp will take this
690	 * after a context switch -- we do the frequent path in ndptrap_frstor
691	 * below; for all other cases, we let the trap code handle it
692	 */
693	pushq	%rax
694	pushq	%rbx
695	cmpw    $KCS_SEL, 24(%rsp)	/* did we come from kernel mode? */
696	jne     1f
697	LOADCPU(%rbx)			/* if yes, don't swapgs */
698	jmp	2f
6991:
700	SWAPGS				/* if from user, need swapgs */
701	LOADCPU(%rbx)
702	SWAPGS
7032:
704	cmpl	$0, fpu_exists(%rip)
705	je	.handle_in_trap		/* let trap handle no fp case */
706	movq	CPU_THREAD(%rbx), %rax	/* %rax = curthread */
707	movl	$FPU_EN, %ebx
708	movq	T_LWP(%rax), %rax	/* %rax = lwp */
709	testq	%rax, %rax
710	jz	.handle_in_trap		/* should not happen? */
711#if LWP_PCB_FPU	!= 0
712	addq	$LWP_PCB_FPU, %rax	/* &lwp->lwp_pcb.pcb_fpu */
713#endif
714	testl	%ebx, PCB_FPU_FLAGS(%rax)
715	jz	.handle_in_trap		/* must be the first fault */
716	clts
717	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rax)
718#if FPU_CTX_FPU_REGS != 0
719	addq	$FPU_CTX_FPU_REGS, %rax
720#endif
721	/*
722	 * the label below is used in trap.c to detect FP faults in
723	 * kernel due to user fault.
724	 */
725	ALTENTRY(ndptrap_frstor)
726	FXRSTORQ	((%rax))
727	popq	%rbx
728	popq	%rax
729	IRET
730	/*NOTREACHED*/
731
732.handle_in_trap:
733	popq	%rbx
734	popq	%rax
735	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
736	jmp	cmninttrap
737	SET_SIZE(ndptrap_frstor)
738	SET_SIZE(ndptrap)
739
740#endif	/* __xpv */
741
742#elif defined(__i386)
743
744	ENTRY_NP(ndptrap)
745	/*
746	 * We want to do this quickly as every lwp using fp will take this
747	 * after a context switch -- we do the frequent path in fpnoextflt
748	 * below; for all other cases, we let the trap code handle it
749	 */
750	pushl	%eax
751	pushl	%ebx
752	pushl	%ds
753	pushl	%gs
754	movl	$KDS_SEL, %ebx
755	movw	%bx, %ds
756	movl	$KGS_SEL, %eax
757	movw	%ax, %gs
758	LOADCPU(%eax)
759	cmpl	$0, fpu_exists
760	je	.handle_in_trap		/* let trap handle no fp case */
761	movl	CPU_THREAD(%eax), %ebx	/* %ebx = curthread */
762	movl	$FPU_EN, %eax
763	movl	T_LWP(%ebx), %ebx	/* %ebx = lwp */
764	testl	%ebx, %ebx
765	jz	.handle_in_trap		/* should not happen? */
766#if LWP_PCB_FPU != 0
767	addl	$LWP_PCB_FPU, %ebx 	/* &lwp->lwp_pcb.pcb_fpu */
768#endif
769	testl	%eax, PCB_FPU_FLAGS(%ebx)
770	jz	.handle_in_trap		/* must be the first fault */
771	CLTS
772	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx)
773#if FPU_CTX_FPU_REGS != 0
774	addl	$FPU_CTX_FPU_REGS, %ebx
775#endif
776	/*
777	 * the label below is used in trap.c to detect FP faults in kernel
778	 * due to user fault.
779	 */
780	ALTENTRY(ndptrap_frstor)
781	.globl	_patch_fxrstor_ebx
782_patch_fxrstor_ebx:
783	frstor	(%ebx)		/* may be patched to fxrstor */
784	nop			/* (including this byte) */
785	popl	%gs
786	popl	%ds
787	popl	%ebx
788	popl	%eax
789	IRET
790
791.handle_in_trap:
792	popl	%gs
793	popl	%ds
794	popl	%ebx
795	popl	%eax
796	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
797	jmp	cmninttrap
798	SET_SIZE(ndptrap_frstor)
799	SET_SIZE(ndptrap)
800
801#endif	/* __i386 */
802
803#if !defined(__xpv)
804#if defined(__amd64)
805
806	/*
807	 * #DF
808	 */
809	ENTRY_NP(syserrtrap)
810	pushq	$T_DBLFLT
811	SET_CPU_GSBASE
812
813	/*
814	 * We share this handler with kmdb (if kmdb is loaded).  As such, we
815	 * may have reached this point after encountering a #df in kmdb.  If
816	 * that happens, we'll still be on kmdb's IDT.  We need to switch back
817	 * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
818	 * here from kmdb, kmdb is probably in a very sickly state, and
819	 * shouldn't be entered from the panic flow.  We'll suppress that
820	 * entry by setting nopanicdebug.
821	 */
822	pushq	%rax
823	subq	$DESCTBR_SIZE, %rsp
824	sidt	(%rsp)
825	movq	%gs:CPU_IDT, %rax
826	cmpq	%rax, DTR_BASE(%rsp)
827	je	1f
828
829	movq	%rax, DTR_BASE(%rsp)
830	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
831	lidt	(%rsp)
832
833	movl	$1, nopanicdebug
834
8351:	addq	$DESCTBR_SIZE, %rsp
836	popq	%rax
837
838	DFTRAP_PUSH
839
840	/*
841	 * freeze trap trace.
842	 */
843#ifdef TRAPTRACE
844	leaq	trap_trace_freeze(%rip), %r11
845	incl	(%r11)
846#endif
847
848	ENABLE_INTR_FLAGS
849
850	movq	%rsp, %rdi	/* &regs */
851	xorl	%esi, %esi	/* clear address */
852	xorl	%edx, %edx	/* cpuid = 0 */
853	call	trap
854
855	SET_SIZE(syserrtrap)
856
857#elif defined(__i386)
858
859	/*
860	 * #DF
861	 */
862	ENTRY_NP(syserrtrap)
863	cli				/* disable interrupts */
864
865	/*
866	 * We share this handler with kmdb (if kmdb is loaded).  As such, we
867	 * may have reached this point after encountering a #df in kmdb.  If
868	 * that happens, we'll still be on kmdb's IDT.  We need to switch back
869	 * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
870	 * here from kmdb, kmdb is probably in a very sickly state, and
871	 * shouldn't be entered from the panic flow.  We'll suppress that
872	 * entry by setting nopanicdebug.
873	 */
874
875	subl	$DESCTBR_SIZE, %esp
876	movl	%gs:CPU_IDT, %eax
877	sidt	(%esp)
878	cmpl	DTR_BASE(%esp), %eax
879	je	1f
880
881	movl	%eax, DTR_BASE(%esp)
882	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
883	lidt	(%esp)
884
885	movl	$1, nopanicdebug
886
8871:	addl	$DESCTBR_SIZE, %esp
888
889	/*
890	 * Check the CPL in the TSS to see what mode
891	 * (user or kernel) we took the fault in.  At this
892	 * point we are running in the context of the double
893	 * fault task (dftss) but the CPU's task points to
894	 * the previous task (ktss) where the process context
895	 * has been saved as the result of the task switch.
896	 */
897	movl	%gs:CPU_TSS, %eax	/* get the TSS */
898	movl	TSS_SS(%eax), %ebx	/* save the fault SS */
899	movl	TSS_ESP(%eax), %edx	/* save the fault ESP */
900	testw	$CPL_MASK, TSS_CS(%eax)	/* user mode ? */
901	jz	make_frame
902	movw	TSS_SS0(%eax), %ss	/* get on the kernel stack */
903	movl	TSS_ESP0(%eax), %esp
904
905	/*
906	 * Clear the NT flag to avoid a task switch when the process
907	 * finally pops the EFL off the stack via an iret.  Clear
908	 * the TF flag since that is what the processor does for
909	 * a normal exception. Clear the IE flag so that interrupts
910	 * remain disabled.
911	 */
912	movl	TSS_EFL(%eax), %ecx
913	andl	$_BITNOT(PS_NT|PS_T|PS_IE), %ecx
914	pushl	%ecx
915	popfl				/* restore the EFL */
916	movw	TSS_LDT(%eax), %cx	/* restore the LDT */
917	lldt	%cx
918
919	/*
920	 * Restore process segment selectors.
921	 */
922	movw	TSS_DS(%eax), %ds
923	movw	TSS_ES(%eax), %es
924	movw	TSS_FS(%eax), %fs
925	movw	TSS_GS(%eax), %gs
926
927	/*
928	 * Restore task segment selectors.
929	 */
930	movl	$KDS_SEL, TSS_DS(%eax)
931	movl	$KDS_SEL, TSS_ES(%eax)
932	movl	$KDS_SEL, TSS_SS(%eax)
933	movl	$KFS_SEL, TSS_FS(%eax)
934	movl	$KGS_SEL, TSS_GS(%eax)
935
936	/*
937	 * Clear the TS bit, the busy bits in both task
938	 * descriptors, and switch tasks.
939	 */
940	clts
941	leal	gdt0, %ecx
942	movl	DFTSS_SEL+4(%ecx), %esi
943	andl	$_BITNOT(0x200), %esi
944	movl	%esi, DFTSS_SEL+4(%ecx)
945	movl	KTSS_SEL+4(%ecx), %esi
946	andl	$_BITNOT(0x200), %esi
947	movl	%esi, KTSS_SEL+4(%ecx)
948	movw	$KTSS_SEL, %cx
949	ltr	%cx
950
951	/*
952	 * Restore part of the process registers.
953	 */
954	movl	TSS_EBP(%eax), %ebp
955	movl	TSS_ECX(%eax), %ecx
956	movl	TSS_ESI(%eax), %esi
957	movl	TSS_EDI(%eax), %edi
958
959make_frame:
960	/*
961	 * Make a trap frame.  Leave the error code (0) on
962	 * the stack since the first word on a trap stack is
963	 * unused anyway.
964	 */
965	pushl	%ebx			/ fault SS
966	pushl	%edx			/ fault ESP
967	pushl	TSS_EFL(%eax)		/ fault EFL
968	pushl	TSS_CS(%eax)		/ fault CS
969	pushl	TSS_EIP(%eax)		/ fault EIP
970	pushl	$0			/ error code
971	pushl	$T_DBLFLT		/ trap number 8
972	movl	TSS_EBX(%eax), %ebx	/ restore EBX
973	movl	TSS_EDX(%eax), %edx	/ restore EDX
974	movl	TSS_EAX(%eax), %eax	/ restore EAX
975	sti				/ enable interrupts
976	jmp	cmntrap
977	SET_SIZE(syserrtrap)
978
979#endif	/* __i386 */
980#endif	/* !__xpv */
981
982	ENTRY_NP(overrun)
983	push	$0
984	TRAP_NOERR(T_EXTOVRFLT)	/* $9 i386 only - not generated */
985	jmp	cmninttrap
986	SET_SIZE(overrun)
987
988	/*
989	 * #TS
990	 */
991	ENTRY_NP(invtsstrap)
992	TRAP_ERR(T_TSSFLT)	/* $10 already have error code on stack */
993	jmp	cmntrap
994	SET_SIZE(invtsstrap)
995
996	/*
997	 * #NP
998	 */
999	ENTRY_NP(segnptrap)
1000	TRAP_ERR(T_SEGFLT)	/* $11 already have error code on stack */
1001#if defined(__amd64)
1002	SET_CPU_GSBASE
1003#endif
1004	jmp	cmntrap
1005	SET_SIZE(segnptrap)
1006
1007	/*
1008	 * #SS
1009	 */
1010	ENTRY_NP(stktrap)
1011	TRAP_ERR(T_STKFLT)	/* $12 already have error code on stack */
1012	jmp	cmntrap
1013	SET_SIZE(stktrap)
1014
1015	/*
1016	 * #GP
1017	 */
1018	ENTRY_NP(gptrap)
1019	TRAP_ERR(T_GPFLT)	/* $13 already have error code on stack */
1020#if defined(__amd64)
1021	SET_CPU_GSBASE
1022#endif
1023	jmp	cmntrap
1024	SET_SIZE(gptrap)
1025
1026	/*
1027	 * #PF
1028	 */
1029	ENTRY_NP(pftrap)
1030	TRAP_ERR(T_PGFLT)	/* $14 already have error code on stack */
1031	INTR_PUSH
1032#if defined(__xpv)
1033
1034#if defined(__amd64)
1035	movq	%gs:CPU_VCPU_INFO, %r15
1036	movq	VCPU_INFO_ARCH_CR2(%r15), %r15	/* vcpu[].arch.cr2 */
1037#elif defined(__i386)
1038	movl	%gs:CPU_VCPU_INFO, %esi
1039	movl	VCPU_INFO_ARCH_CR2(%esi), %esi	/* vcpu[].arch.cr2 */
1040#endif	/* __i386 */
1041
1042#else	/* __xpv */
1043
1044#if defined(__amd64)
1045	movq	%cr2, %r15
1046#elif defined(__i386)
1047	movl	%cr2, %esi
1048#endif	/* __i386 */
1049
1050#endif	/* __xpv */
1051	jmp	cmntrap_pushed
1052	SET_SIZE(pftrap)
1053
1054#if !defined(__amd64)
1055
1056	.globl	idt0_default_r
1057
1058	/*
1059	 * #PF pentium bug workaround
1060	 */
1061	ENTRY_NP(pentium_pftrap)
1062	pushl	%eax
1063	movl	%cr2, %eax
1064	andl	$MMU_STD_PAGEMASK, %eax
1065
1066	cmpl	%eax, %cs:idt0_default_r+2	/* fixme */
1067
1068	je	check_for_user_address
1069user_mode:
1070	popl	%eax
1071	pushl	$T_PGFLT	/* $14 */
1072	jmp	cmntrap
1073check_for_user_address:
1074	/*
1075	 * Before we assume that we have an unmapped trap on our hands,
1076	 * check to see if this is a fault from user mode.  If it is,
1077	 * we'll kick back into the page fault handler.
1078	 */
1079	movl	4(%esp), %eax	/* error code */
1080	andl	$PF_ERR_USER, %eax
1081	jnz	user_mode
1082
1083	/*
1084	 * We now know that this is the invalid opcode trap.
1085	 */
1086	popl	%eax
1087	addl	$4, %esp	/* pop error code */
1088	jmp	invoptrap
1089	SET_SIZE(pentium_pftrap)
1090
1091#endif	/* !__amd64 */
1092
1093	ENTRY_NP(resvtrap)
1094	TRAP_NOERR(15)		/* (reserved)  */
1095	jmp	cmntrap
1096	SET_SIZE(resvtrap)
1097
1098	/*
1099	 * #MF
1100	 */
1101	ENTRY_NP(ndperr)
1102	TRAP_NOERR(T_EXTERRFLT)	/* $16 */
1103	jmp	cmninttrap
1104	SET_SIZE(ndperr)
1105
1106	/*
1107	 * #AC
1108	 */
1109	ENTRY_NP(achktrap)
1110	TRAP_ERR(T_ALIGNMENT)	/* $17 */
1111	jmp	cmntrap
1112	SET_SIZE(achktrap)
1113
1114	/*
1115	 * #MC
1116	 */
1117	.globl	cmi_mca_trap	/* see uts/i86pc/os/cmi.c */
1118
1119#if defined(__amd64)
1120
1121	ENTRY_NP(mcetrap)
1122	TRAP_NOERR(T_MCE)	/* $18 */
1123
1124	SET_CPU_GSBASE
1125
1126	INTR_PUSH
1127	INTGATE_INIT_KERNEL_FLAGS
1128
1129	TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
1130	TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
1131	TRACE_STAMP(%rdi)
1132
1133	movq	%rsp, %rbp
1134
1135	movq	%rsp, %rdi	/* arg0 = struct regs *rp */
1136	call	cmi_mca_trap	/* cmi_mca_trap(rp); */
1137
1138	jmp	_sys_rtt
1139	SET_SIZE(mcetrap)
1140
1141#else
1142
1143	ENTRY_NP(mcetrap)
1144	TRAP_NOERR(T_MCE)	/* $18 */
1145
1146	INTR_PUSH
1147	INTGATE_INIT_KERNEL_FLAGS
1148
1149	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1150	TRACE_REGS(%edi, %esp, %ebx, %ecx)
1151	TRACE_STAMP(%edi)
1152
1153	movl	%esp, %ebp
1154
1155	movl	%esp, %ecx
1156	pushl	%ecx		/* arg0 = struct regs *rp */
1157	call	cmi_mca_trap	/* cmi_mca_trap(rp) */
1158	addl	$4, %esp	/* pop arg0 */
1159
1160	jmp	_sys_rtt
1161	SET_SIZE(mcetrap)
1162
1163#endif
1164
1165	/*
1166	 * #XF
1167	 */
1168	ENTRY_NP(xmtrap)
1169	TRAP_NOERR(T_SIMDFPE)	/* $19 */
1170	jmp	cmninttrap
1171	SET_SIZE(xmtrap)
1172
1173	ENTRY_NP(invaltrap)
1174	TRAP_NOERR(30)		/* very invalid */
1175	jmp	cmntrap
1176	SET_SIZE(invaltrap)
1177
1178	ENTRY_NP(invalint)
1179	TRAP_NOERR(31)		/* even more so */
1180	jmp	cmnint
1181	SET_SIZE(invalint)
1182
1183	.globl	fasttable
1184
1185#if defined(__amd64)
1186
1187	ENTRY_NP(fasttrap)
1188	cmpl	$T_LASTFAST, %eax
1189	ja	1f
1190	orl	%eax, %eax	/* (zero extend top 32-bits) */
1191	leaq	fasttable(%rip), %r11
1192	leaq	(%r11, %rax, CLONGSIZE), %r11
1193	jmp	*(%r11)
11941:
1195	/*
1196	 * Fast syscall number was illegal.  Make it look
1197	 * as if the INT failed.  Modify %rip to point before the
1198	 * INT, push the expected error code and fake a GP fault.
1199	 *
1200	 * XXX Why make the error code be offset into idt + 1?
1201	 * Instead we should push a real (soft?) error code
1202	 * on the stack and #gp handler could know about fasttraps?
1203	 */
1204	XPV_TRAP_POP
1205
1206	subq	$2, (%rsp)	/* XXX int insn 2-bytes */
1207	pushq	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1208
1209#if defined(__xpv)
1210	pushq	%r11
1211	pushq	%rcx
1212#endif
1213	jmp	gptrap
1214	SET_SIZE(fasttrap)
1215
1216#elif defined(__i386)
1217
1218	ENTRY_NP(fasttrap)
1219	cmpl	$T_LASTFAST, %eax
1220	ja	1f
1221	jmp	*%cs:fasttable(, %eax, CLONGSIZE)
12221:
1223	/*
1224	 * Fast syscall number was illegal.  Make it look
1225	 * as if the INT failed.  Modify %eip to point before the
1226	 * INT, push the expected error code and fake a GP fault.
1227	 *
1228	 * XXX Why make the error code be offset into idt + 1?
1229	 * Instead we should push a real (soft?) error code
1230	 * on the stack and #gp handler could know about fasttraps?
1231	 */
1232	subl	$2, (%esp)	/* XXX int insn 2-bytes */
1233	pushl	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1234	jmp	gptrap
1235	SET_SIZE(fasttrap)
1236
1237#endif	/* __i386 */
1238
1239	ENTRY_NP(dtrace_ret)
1240	TRAP_NOERR(T_DTRACE_RET)
1241	jmp	dtrace_trap
1242	SET_SIZE(dtrace_ret)
1243
1244#if defined(__amd64)
1245
1246	/*
1247	 * RFLAGS 24 bytes up the stack from %rsp.
1248	 * XXX a constant would be nicer.
1249	 */
1250	ENTRY_NP(fast_null)
1251	XPV_TRAP_POP
1252	orq	$PS_C, 24(%rsp)	/* set carry bit in user flags */
1253	IRET
1254	/*NOTREACHED*/
1255	SET_SIZE(fast_null)
1256
1257#elif defined(__i386)
1258
1259	ENTRY_NP(fast_null)
1260	orw	$PS_C, 8(%esp)	/* set carry bit in user flags */
1261	IRET
1262	SET_SIZE(fast_null)
1263
1264#endif	/* __i386 */
1265
1266	/*
1267	 * Interrupts start at 32
1268	 */
1269#define MKIVCT(n)			\
1270	ENTRY_NP(ivct/**/n)		\
1271	push	$0;			\
1272	push	$n - 0x20;		\
1273	jmp	cmnint;			\
1274	SET_SIZE(ivct/**/n)
1275
1276	MKIVCT(32)
1277	MKIVCT(33)
1278	MKIVCT(34)
1279	MKIVCT(35)
1280	MKIVCT(36)
1281	MKIVCT(37)
1282	MKIVCT(38)
1283	MKIVCT(39)
1284	MKIVCT(40)
1285	MKIVCT(41)
1286	MKIVCT(42)
1287	MKIVCT(43)
1288	MKIVCT(44)
1289	MKIVCT(45)
1290	MKIVCT(46)
1291	MKIVCT(47)
1292	MKIVCT(48)
1293	MKIVCT(49)
1294	MKIVCT(50)
1295	MKIVCT(51)
1296	MKIVCT(52)
1297	MKIVCT(53)
1298	MKIVCT(54)
1299	MKIVCT(55)
1300	MKIVCT(56)
1301	MKIVCT(57)
1302	MKIVCT(58)
1303	MKIVCT(59)
1304	MKIVCT(60)
1305	MKIVCT(61)
1306	MKIVCT(62)
1307	MKIVCT(63)
1308	MKIVCT(64)
1309	MKIVCT(65)
1310	MKIVCT(66)
1311	MKIVCT(67)
1312	MKIVCT(68)
1313	MKIVCT(69)
1314	MKIVCT(70)
1315	MKIVCT(71)
1316	MKIVCT(72)
1317	MKIVCT(73)
1318	MKIVCT(74)
1319	MKIVCT(75)
1320	MKIVCT(76)
1321	MKIVCT(77)
1322	MKIVCT(78)
1323	MKIVCT(79)
1324	MKIVCT(80)
1325	MKIVCT(81)
1326	MKIVCT(82)
1327	MKIVCT(83)
1328	MKIVCT(84)
1329	MKIVCT(85)
1330	MKIVCT(86)
1331	MKIVCT(87)
1332	MKIVCT(88)
1333	MKIVCT(89)
1334	MKIVCT(90)
1335	MKIVCT(91)
1336	MKIVCT(92)
1337	MKIVCT(93)
1338	MKIVCT(94)
1339	MKIVCT(95)
1340	MKIVCT(96)
1341	MKIVCT(97)
1342	MKIVCT(98)
1343	MKIVCT(99)
1344	MKIVCT(100)
1345	MKIVCT(101)
1346	MKIVCT(102)
1347	MKIVCT(103)
1348	MKIVCT(104)
1349	MKIVCT(105)
1350	MKIVCT(106)
1351	MKIVCT(107)
1352	MKIVCT(108)
1353	MKIVCT(109)
1354	MKIVCT(110)
1355	MKIVCT(111)
1356	MKIVCT(112)
1357	MKIVCT(113)
1358	MKIVCT(114)
1359	MKIVCT(115)
1360	MKIVCT(116)
1361	MKIVCT(117)
1362	MKIVCT(118)
1363	MKIVCT(119)
1364	MKIVCT(120)
1365	MKIVCT(121)
1366	MKIVCT(122)
1367	MKIVCT(123)
1368	MKIVCT(124)
1369	MKIVCT(125)
1370	MKIVCT(126)
1371	MKIVCT(127)
1372	MKIVCT(128)
1373	MKIVCT(129)
1374	MKIVCT(130)
1375	MKIVCT(131)
1376	MKIVCT(132)
1377	MKIVCT(133)
1378	MKIVCT(134)
1379	MKIVCT(135)
1380	MKIVCT(136)
1381	MKIVCT(137)
1382	MKIVCT(138)
1383	MKIVCT(139)
1384	MKIVCT(140)
1385	MKIVCT(141)
1386	MKIVCT(142)
1387	MKIVCT(143)
1388	MKIVCT(144)
1389	MKIVCT(145)
1390	MKIVCT(146)
1391	MKIVCT(147)
1392	MKIVCT(148)
1393	MKIVCT(149)
1394	MKIVCT(150)
1395	MKIVCT(151)
1396	MKIVCT(152)
1397	MKIVCT(153)
1398	MKIVCT(154)
1399	MKIVCT(155)
1400	MKIVCT(156)
1401	MKIVCT(157)
1402	MKIVCT(158)
1403	MKIVCT(159)
1404	MKIVCT(160)
1405	MKIVCT(161)
1406	MKIVCT(162)
1407	MKIVCT(163)
1408	MKIVCT(164)
1409	MKIVCT(165)
1410	MKIVCT(166)
1411	MKIVCT(167)
1412	MKIVCT(168)
1413	MKIVCT(169)
1414	MKIVCT(170)
1415	MKIVCT(171)
1416	MKIVCT(172)
1417	MKIVCT(173)
1418	MKIVCT(174)
1419	MKIVCT(175)
1420	MKIVCT(176)
1421	MKIVCT(177)
1422	MKIVCT(178)
1423	MKIVCT(179)
1424	MKIVCT(180)
1425	MKIVCT(181)
1426	MKIVCT(182)
1427	MKIVCT(183)
1428	MKIVCT(184)
1429	MKIVCT(185)
1430	MKIVCT(186)
1431	MKIVCT(187)
1432	MKIVCT(188)
1433	MKIVCT(189)
1434	MKIVCT(190)
1435	MKIVCT(191)
1436	MKIVCT(192)
1437	MKIVCT(193)
1438	MKIVCT(194)
1439	MKIVCT(195)
1440	MKIVCT(196)
1441	MKIVCT(197)
1442	MKIVCT(198)
1443	MKIVCT(199)
1444	MKIVCT(200)
1445	MKIVCT(201)
1446	MKIVCT(202)
1447	MKIVCT(203)
1448	MKIVCT(204)
1449	MKIVCT(205)
1450	MKIVCT(206)
1451	MKIVCT(207)
1452	MKIVCT(208)
1453	MKIVCT(209)
1454	MKIVCT(210)
1455	MKIVCT(211)
1456	MKIVCT(212)
1457	MKIVCT(213)
1458	MKIVCT(214)
1459	MKIVCT(215)
1460	MKIVCT(216)
1461	MKIVCT(217)
1462	MKIVCT(218)
1463	MKIVCT(219)
1464	MKIVCT(220)
1465	MKIVCT(221)
1466	MKIVCT(222)
1467	MKIVCT(223)
1468	MKIVCT(224)
1469	MKIVCT(225)
1470	MKIVCT(226)
1471	MKIVCT(227)
1472	MKIVCT(228)
1473	MKIVCT(229)
1474	MKIVCT(230)
1475	MKIVCT(231)
1476	MKIVCT(232)
1477	MKIVCT(233)
1478	MKIVCT(234)
1479	MKIVCT(235)
1480	MKIVCT(236)
1481	MKIVCT(237)
1482	MKIVCT(238)
1483	MKIVCT(239)
1484	MKIVCT(240)
1485	MKIVCT(241)
1486	MKIVCT(242)
1487	MKIVCT(243)
1488	MKIVCT(244)
1489	MKIVCT(245)
1490	MKIVCT(246)
1491	MKIVCT(247)
1492	MKIVCT(248)
1493	MKIVCT(249)
1494	MKIVCT(250)
1495	MKIVCT(251)
1496	MKIVCT(252)
1497	MKIVCT(253)
1498	MKIVCT(254)
1499	MKIVCT(255)
1500
1501#endif	/* __lint */
1502