xref: /titanic_52/usr/src/uts/intel/ia32/ml/exception.s (revision eeb4a8006173fc9bb3a9c16b1f93d24a21639a4c)
1/*
2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3 */
4
5/*
6 * Copyright (c) 1989, 1990 William F. Jolitz.
7 * Copyright (c) 1990 The Regents of the University of California.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
39 */
40
41#include <sys/asm_linkage.h>
42#include <sys/asm_misc.h>
43#include <sys/trap.h>
44#include <sys/psw.h>
45#include <sys/regset.h>
46#include <sys/privregs.h>
47#include <sys/dtrace.h>
48#include <sys/x86_archext.h>
49#include <sys/traptrace.h>
50#include <sys/machparam.h>
51
52/*
53 * only one routine in this file is interesting to lint
54 */
55
56#if defined(__lint)
57
58void
59ndptrap_frstor(void)
60{}
61
62#else
63
64#include "assym.h"
65
66/*
67 * push $0 on stack for traps that do not
68 * generate an error code. This is so the rest
69 * of the kernel can expect a consistent stack
70 * from from any exception.
71 *
72 * Note that for all exceptions for amd64
73 * %r11 and %rcx are on the stack. Just pop
74 * them back into their appropriate registers and let
75 * it get saved as is running native.
76 */
77
78#if defined(__xpv) && defined(__amd64)
79
80#define	NPTRAP_NOERR(trapno)	\
81	pushq	$0;		\
82	pushq	$trapno
83
84#define	TRAP_NOERR(trapno)	\
85	XPV_TRAP_POP;		\
86	NPTRAP_NOERR(trapno)
87
88/*
89 * error code already pushed by hw
90 * onto stack.
91 */
92#define	TRAP_ERR(trapno)	\
93	XPV_TRAP_POP;		\
94	pushq	$trapno
95
96#else /* __xpv && __amd64 */
97
98#define	TRAP_NOERR(trapno)	\
99	push	$0;		\
100	push	$trapno
101
102#define	NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
103
104/*
105 * error code already pushed by hw
106 * onto stack.
107 */
108#define	TRAP_ERR(trapno)	\
109	push	$trapno
110
111#endif	/* __xpv && __amd64 */
112
113
114	/*
115	 * #DE
116	 */
117	ENTRY_NP(div0trap)
118	TRAP_NOERR(T_ZERODIV)	/* $0 */
119	jmp	cmntrap
120	SET_SIZE(div0trap)
121
122	/*
123	 * #DB
124	 *
125	 * Fetch %dr6 and clear it, handing off the value to the
126	 * cmntrap code in %r15/%esi
127	 */
128	ENTRY_NP(dbgtrap)
129	TRAP_NOERR(T_SGLSTP)	/* $1 */
130
131#if defined(__amd64)
132#if !defined(__xpv)		/* no sysenter support yet */
133	/*
134	 * If we get here as a result of single-stepping a sysenter
135	 * instruction, we suddenly find ourselves taking a #db
136	 * in kernel mode -before- we've swapgs'ed.  So before we can
137	 * take the trap, we do the swapgs here, and fix the return
138	 * %rip in trap() so that we return immediately after the
139	 * swapgs in the sysenter handler to avoid doing the swapgs again.
140	 *
141	 * Nobody said that the design of sysenter was particularly
142	 * elegant, did they?
143	 */
144
145	pushq	%r11
146
147	/*
148	 * At this point the stack looks like this:
149	 *
150	 * (high address) 	r_ss
151	 *			r_rsp
152	 *			r_rfl
153	 *			r_cs
154	 *			r_rip		<-- %rsp + 24
155	 *			r_err		<-- %rsp + 16
156	 *			r_trapno	<-- %rsp + 8
157	 * (low address)	%r11		<-- %rsp
158	 */
159	leaq	sys_sysenter(%rip), %r11
160	cmpq	%r11, 24(%rsp)	/* Compare to saved r_rip on the stack */
161	je	1f
162	leaq	brand_sys_sysenter(%rip), %r11
163	cmpq	%r11, 24(%rsp)	/* Compare to saved r_rip on the stack */
164	jne	2f
1651:	SWAPGS
1662:	popq	%r11
167#endif	/* !__xpv */
168
169	INTR_PUSH
170#if defined(__xpv)
171	movl	$6, %edi
172	call	kdi_dreg_get
173	movq	%rax, %r15		/* %db6 -> %r15 */
174	movl	$6, %edi
175	movl	$0, %esi
176	call	kdi_dreg_set		/* 0 -> %db6 */
177#else
178	movq	%db6, %r15
179	xorl	%eax, %eax
180	movq	%rax, %db6
181#endif
182
183#elif defined(__i386)
184
185	INTR_PUSH
186#if defined(__xpv)
187	pushl	$6
188	call	kdi_dreg_get
189	addl	$4, %esp
190	movl	%eax, %esi		/* %dr6 -> %esi */
191	pushl	$0
192	pushl	$6
193	call	kdi_dreg_set		/* 0 -> %dr6 */
194	addl	$8, %esp
195#else
196	movl	%db6, %esi
197	xorl	%eax, %eax
198	movl	%eax, %db6
199#endif
200#endif	/* __i386 */
201
202	jmp	cmntrap_pushed
203	SET_SIZE(dbgtrap)
204
205#if defined(__amd64)
206#if !defined(__xpv)
207
208/*
209 * Macro to set the gsbase or kgsbase to the address of the struct cpu
210 * for this processor.  If we came from userland, set kgsbase else
211 * set gsbase.  We find the proper cpu struct by looping through
212 * the cpu structs for all processors till we find a match for the gdt
213 * of the trapping processor.  The stack is expected to be pointing at
214 * the standard regs pushed by hardware on a trap (plus error code and trapno).
215 */
216#define	SET_CPU_GSBASE							\
217	subq	$REGOFF_TRAPNO, %rsp;	/* save regs */			\
218	movq	%rax, REGOFF_RAX(%rsp);					\
219	movq	%rbx, REGOFF_RBX(%rsp);					\
220	movq	%rcx, REGOFF_RCX(%rsp);					\
221	movq	%rdx, REGOFF_RDX(%rsp);					\
222	movq	%rbp, REGOFF_RBP(%rsp);					\
223	movq	%rsp, %rbp;						\
224	subq	$16, %rsp;		/* space for gdt */		\
225	sgdt	6(%rsp);						\
226	movq	8(%rsp), %rcx;		/* %rcx has gdt to match */	\
227	xorl	%ebx, %ebx;		/* loop index */		\
228	leaq	cpu(%rip), %rdx;	/* cpu pointer array */		\
2291:									\
230	movq	(%rdx, %rbx, CLONGSIZE), %rax;	/* get cpu[i] */	\
231	cmpq	$0x0, %rax;		/* cpu[i] == NULL ? */		\
232	je	2f;			/* yes, continue */		\
233	cmpq	%rcx, CPU_GDT(%rax);	/* gdt == cpu[i]->cpu_gdt ? */	\
234	je	3f;			/* yes, go set gsbase */	\
2352:									\
236	incl	%ebx;			/* i++ */			\
237	cmpl	$NCPU, %ebx;		/* i < NCPU ? */		\
238	jb	1b;			/* yes, loop */			\
239/* XXX BIG trouble if we fall thru here.  We didn't find a gdt match */	\
2403:									\
241	movl	$MSR_AMD_KGSBASE, %ecx;					\
242	cmpw	$KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */	\
243	jne	4f;			/* no, go set KGSBASE */	\
244	movl	$MSR_AMD_GSBASE, %ecx;	/* yes, set GSBASE */		\
245        mfence;				/* OPTERON_ERRATUM_88 */	\
2464:									\
247	movq	%rax, %rdx;		/* write base register */	\
248	shrq	$32, %rdx;						\
249	wrmsr;								\
250	movq	REGOFF_RDX(%rbp), %rdx;	/* restore regs */		\
251	movq	REGOFF_RCX(%rbp), %rcx;					\
252	movq	REGOFF_RBX(%rbp), %rbx;					\
253	movq	REGOFF_RAX(%rbp), %rax;					\
254	movq	%rbp, %rsp;						\
255	movq	REGOFF_RBP(%rsp), %rbp;					\
256	addq	$REGOFF_TRAPNO, %rsp	/* pop stack */
257
258#else	/* __xpv */
259
260#define	SET_CPU_GSBASE	/* noop on the hypervisor */
261
262#endif	/* __xpv */
263#endif	/* __amd64 */
264
265
266#if defined(__amd64)
267
268	/*
269	 * #NMI
270	 *
271	 * XXPV: See 6532669.
272	 */
273	ENTRY_NP(nmiint)
274	TRAP_NOERR(T_NMIFLT)	/* $2 */
275
276	SET_CPU_GSBASE
277
278	/*
279	 * Save all registers and setup segment registers
280	 * with kernel selectors.
281	 */
282	INTR_PUSH
283	INTGATE_INIT_KERNEL_FLAGS
284
285	TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
286	TRACE_REGS(%r12, %rsp, %rax, %rbx)
287	TRACE_STAMP(%r12)
288
289	movq	%rsp, %rbp
290
291	movq	%rbp, %rdi
292	call	av_dispatch_nmivect
293
294	INTR_POP
295	IRET
296	/*NOTREACHED*/
297	SET_SIZE(nmiint)
298
299#elif defined(__i386)
300
301	/*
302	 * #NMI
303	 */
304	ENTRY_NP(nmiint)
305	TRAP_NOERR(T_NMIFLT)	/* $2 */
306
307	/*
308	 * Save all registers and setup segment registers
309	 * with kernel selectors.
310	 */
311	INTR_PUSH
312	INTGATE_INIT_KERNEL_FLAGS
313
314	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
315	TRACE_REGS(%edi, %esp, %ebx, %ecx)
316	TRACE_STAMP(%edi)
317
318	movl	%esp, %ebp
319
320	pushl	%ebp
321	call	av_dispatch_nmivect
322	addl	$4, %esp
323
324	INTR_POP_USER
325	IRET
326	SET_SIZE(nmiint)
327
328#endif	/* __i386 */
329
330	/*
331	 * #BP
332	 */
333	ENTRY_NP(brktrap)
334
335#if defined(__amd64)
336	XPV_TRAP_POP
337	cmpw	$KCS_SEL, 8(%rsp)
338	jne	bp_user
339
340	/*
341	 * This is a breakpoint in the kernel -- it is very likely that this
342	 * is DTrace-induced.  To unify DTrace handling, we spoof this as an
343	 * invalid opcode (#UD) fault.  Note that #BP is a trap, not a fault --
344	 * we must decrement the trapping %rip to make it appear as a fault.
345	 * We then push a non-zero error code to indicate that this is coming
346	 * from #BP.
347	 */
348	decq	(%rsp)
349	push	$1			/* error code -- non-zero for #BP */
350	jmp	ud_kernel
351
352bp_user:
353#endif /* __amd64 */
354
355	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
356	jmp	dtrace_trap
357
358	SET_SIZE(brktrap)
359
360	/*
361	 * #OF
362	 */
363	ENTRY_NP(ovflotrap)
364	TRAP_NOERR(T_OVFLW)	/* $4 */
365	jmp	cmntrap
366	SET_SIZE(ovflotrap)
367
368	/*
369	 * #BR
370	 */
371	ENTRY_NP(boundstrap)
372	TRAP_NOERR(T_BOUNDFLT)	/* $5 */
373	jmp	cmntrap
374	SET_SIZE(boundstrap)
375
376#if defined(__amd64)
377
378	ENTRY_NP(invoptrap)
379
380	XPV_TRAP_POP
381
382	cmpw	$KCS_SEL, 8(%rsp)
383	jne	ud_user
384
385#if defined(__xpv)
386	movb	$0, 12(%rsp)		/* clear saved upcall_mask from %cs */
387#endif
388	push	$0			/* error code -- zero for #UD */
389ud_kernel:
390	push	$0xdddd			/* a dummy trap number */
391	INTR_PUSH
392	movq	REGOFF_RIP(%rsp), %rdi
393	movq	REGOFF_RSP(%rsp), %rsi
394	movq	REGOFF_RAX(%rsp), %rdx
395	pushq	(%rsi)
396	movq	%rsp, %rsi
397	call	dtrace_invop
398	ALTENTRY(dtrace_invop_callsite)
399	addq	$8, %rsp
400	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
401	je	ud_push
402	cmpl	$DTRACE_INVOP_LEAVE, %eax
403	je	ud_leave
404	cmpl	$DTRACE_INVOP_NOP, %eax
405	je	ud_nop
406	cmpl	$DTRACE_INVOP_RET, %eax
407	je	ud_ret
408	jmp	ud_trap
409
410ud_push:
411	/*
412	 * We must emulate a "pushq %rbp".  To do this, we pull the stack
413	 * down 8 bytes, and then store the base pointer.
414	 */
415	INTR_POP
416	subq	$16, %rsp		/* make room for %rbp */
417	pushq	%rax			/* push temp */
418	movq	24(%rsp), %rax		/* load calling RIP */
419	addq	$1, %rax		/* increment over trapping instr */
420	movq	%rax, 8(%rsp)		/* store calling RIP */
421	movq	32(%rsp), %rax		/* load calling CS */
422	movq	%rax, 16(%rsp)		/* store calling CS */
423	movq	40(%rsp), %rax		/* load calling RFLAGS */
424	movq	%rax, 24(%rsp)		/* store calling RFLAGS */
425	movq	48(%rsp), %rax		/* load calling RSP */
426	subq	$8, %rax		/* make room for %rbp */
427	movq	%rax, 32(%rsp)		/* store calling RSP */
428	movq	56(%rsp), %rax		/* load calling SS */
429	movq	%rax, 40(%rsp)		/* store calling SS */
430	movq	32(%rsp), %rax		/* reload calling RSP */
431	movq	%rbp, (%rax)		/* store %rbp there */
432	popq	%rax			/* pop off temp */
433	IRET				/* return from interrupt */
434	/*NOTREACHED*/
435
436ud_leave:
437	/*
438	 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
439	 * followed by a "popq %rbp".  This is quite a bit simpler on amd64
440	 * than it is on i386 -- we can exploit the fact that the %rsp is
441	 * explicitly saved to effect the pop without having to reshuffle
442	 * the other data pushed for the trap.
443	 */
444	INTR_POP
445	pushq	%rax			/* push temp */
446	movq	8(%rsp), %rax		/* load calling RIP */
447	addq	$1, %rax		/* increment over trapping instr */
448	movq	%rax, 8(%rsp)		/* store calling RIP */
449	movq	(%rbp), %rax		/* get new %rbp */
450	addq	$8, %rbp		/* adjust new %rsp */
451	movq	%rbp, 32(%rsp)		/* store new %rsp */
452	movq	%rax, %rbp		/* set new %rbp */
453	popq	%rax			/* pop off temp */
454	IRET				/* return from interrupt */
455	/*NOTREACHED*/
456
457ud_nop:
458	/*
459	 * We must emulate a "nop".  This is obviously not hard:  we need only
460	 * advance the %rip by one.
461	 */
462	INTR_POP
463	incq	(%rsp)
464	IRET
465	/*NOTREACHED*/
466
467ud_ret:
468	INTR_POP
469	pushq	%rax			/* push temp */
470	movq	32(%rsp), %rax		/* load %rsp */
471	movq	(%rax), %rax		/* load calling RIP */
472	movq	%rax, 8(%rsp)		/* store calling RIP */
473	addq	$8, 32(%rsp)		/* adjust new %rsp */
474	popq	%rax			/* pop off temp */
475	IRET				/* return from interrupt */
476	/*NOTREACHED*/
477
478ud_trap:
479	/*
480	 * We're going to let the kernel handle this as a normal #UD.  If,
481	 * however, we came through #BP and are spoofing #UD (in this case,
482	 * the stored error value will be non-zero), we need to de-spoof
483	 * the trap by incrementing %rip and pushing T_BPTFLT.
484	 */
485	cmpq	$0, REGOFF_ERR(%rsp)
486	je	ud_ud
487	incq	REGOFF_RIP(%rsp)
488	addq	$REGOFF_RIP, %rsp
489	NPTRAP_NOERR(T_BPTFLT)	/* $3 */
490	jmp	cmntrap
491
492ud_ud:
493	addq	$REGOFF_RIP, %rsp
494ud_user:
495	NPTRAP_NOERR(T_ILLINST)
496	jmp	cmntrap
497	SET_SIZE(invoptrap)
498
499#elif defined(__i386)
500
501	/*
502	 * #UD
503	 */
504	ENTRY_NP(invoptrap)
505	/*
506	 * If we are taking an invalid opcode trap while in the kernel, this
507	 * is likely an FBT probe point.
508	 */
509	pushl   %gs
510	cmpw	$KGS_SEL, (%esp)
511	jne	8f
512
513	addl	$4, %esp
514#if defined(__xpv)
515	movb	$0, 6(%esp)		/* clear saved upcall_mask from %cs */
516#endif	/* __xpv */
517	pusha
518	pushl	%eax			/* push %eax -- may be return value */
519	pushl	%esp			/* push stack pointer */
520	addl	$48, (%esp)		/* adjust to incoming args */
521	pushl	40(%esp)		/* push calling EIP */
522	call	dtrace_invop
523	ALTENTRY(dtrace_invop_callsite)
524	addl	$12, %esp
525	cmpl	$DTRACE_INVOP_PUSHL_EBP, %eax
526	je	1f
527	cmpl	$DTRACE_INVOP_POPL_EBP, %eax
528	je	2f
529	cmpl	$DTRACE_INVOP_LEAVE, %eax
530	je	3f
531	cmpl	$DTRACE_INVOP_NOP, %eax
532	je	4f
533	jmp	7f
5341:
535	/*
536	 * We must emulate a "pushl %ebp".  To do this, we pull the stack
537	 * down 4 bytes, and then store the base pointer.
538	 */
539	popa
540	subl	$4, %esp		/* make room for %ebp */
541	pushl	%eax			/* push temp */
542	movl	8(%esp), %eax		/* load calling EIP */
543	incl	%eax			/* increment over LOCK prefix */
544	movl	%eax, 4(%esp)		/* store calling EIP */
545	movl	12(%esp), %eax		/* load calling CS */
546	movl	%eax, 8(%esp)		/* store calling CS */
547	movl	16(%esp), %eax		/* load calling EFLAGS */
548	movl	%eax, 12(%esp)		/* store calling EFLAGS */
549	movl	%ebp, 16(%esp)		/* push %ebp */
550	popl	%eax			/* pop off temp */
551	jmp	_emul_done
5522:
553	/*
554	 * We must emulate a "popl %ebp".  To do this, we do the opposite of
555	 * the above:  we remove the %ebp from the stack, and squeeze up the
556	 * saved state from the trap.
557	 */
558	popa
559	pushl	%eax			/* push temp */
560	movl	16(%esp), %ebp		/* pop %ebp */
561	movl	12(%esp), %eax		/* load calling EFLAGS */
562	movl	%eax, 16(%esp)		/* store calling EFLAGS */
563	movl	8(%esp), %eax		/* load calling CS */
564	movl	%eax, 12(%esp)		/* store calling CS */
565	movl	4(%esp), %eax		/* load calling EIP */
566	incl	%eax			/* increment over LOCK prefix */
567	movl	%eax, 8(%esp)		/* store calling EIP */
568	popl	%eax			/* pop off temp */
569	addl	$4, %esp		/* adjust stack pointer */
570	jmp	_emul_done
5713:
572	/*
573	 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
574	 * followed by a "popl %ebp".  This looks similar to the above, but
575	 * requires two temporaries:  one for the new base pointer, and one
576	 * for the staging register.
577	 */
578	popa
579	pushl	%eax			/* push temp */
580	pushl	%ebx			/* push temp */
581	movl	%ebp, %ebx		/* set temp to old %ebp */
582	movl	(%ebx), %ebp		/* pop %ebp */
583	movl	16(%esp), %eax		/* load calling EFLAGS */
584	movl	%eax, (%ebx)		/* store calling EFLAGS */
585	movl	12(%esp), %eax		/* load calling CS */
586	movl	%eax, -4(%ebx)		/* store calling CS */
587	movl	8(%esp), %eax		/* load calling EIP */
588	incl	%eax			/* increment over LOCK prefix */
589	movl	%eax, -8(%ebx)		/* store calling EIP */
590	movl	%ebx, -4(%esp)		/* temporarily store new %esp */
591	popl	%ebx			/* pop off temp */
592	popl	%eax			/* pop off temp */
593	movl	-12(%esp), %esp		/* set stack pointer */
594	subl	$8, %esp		/* adjust for three pushes, one pop */
595	jmp	_emul_done
5964:
597	/*
598	 * We must emulate a "nop".  This is obviously not hard:  we need only
599	 * advance the %eip by one.
600	 */
601	popa
602	incl	(%esp)
603_emul_done:
604	IRET				/* return from interrupt */
6057:
606	popa
607	pushl	$0
608	pushl	$T_ILLINST	/* $6 */
609	jmp	cmntrap
6108:
611	addl	$4, %esp
612	pushl	$0
613	pushl	$T_ILLINST	/* $6 */
614	jmp	cmntrap
615	SET_SIZE(invoptrap)
616
617#endif	/* __i386 */
618
619#if defined(__amd64)
620
621	/*
622	 * #NM
623	 */
624#if defined(__xpv)
625
626	ENTRY_NP(ndptrap)
627	/*
628	 * (On the hypervisor we must make a hypercall so we might as well
629	 * save everything and handle as in a normal trap.)
630	 */
631	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
632	INTR_PUSH
633
634	/*
635	 * We want to do this quickly as every lwp using fp will take this
636	 * after a context switch -- we do the frequent path in ndptrap_frstor
637	 * below; for all other cases, we let the trap code handle it
638	 */
639	LOADCPU(%rax)			/* swapgs handled in hypervisor */
640	cmpl	$0, fpu_exists(%rip)
641	je	.handle_in_trap		/* let trap handle no fp case */
642	movq	CPU_THREAD(%rax), %rbx	/* %rbx = curthread */
643	movl	$FPU_EN, %eax
644	movq	T_LWP(%rbx), %rbx	/* %rbx = lwp */
645	testq	%rbx, %rbx
646	jz	.handle_in_trap		/* should not happen? */
647#if LWP_PCB_FPU	!= 0
648	addq	$LWP_PCB_FPU, %rbx	/* &lwp->lwp_pcb.pcb_fpu */
649#endif
650	testl	%eax, PCB_FPU_FLAGS(%rbx)
651	jz	.handle_in_trap		/* must be the first fault */
652	CLTS
653	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
654#if FPU_CTX_FPU_REGS != 0
655	addq	$FPU_CTX_FPU_REGS, %rbx
656#endif
657
658	movl	FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax	/* for xrstor */
659	movl	FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx	/* for xrstor */
660
661	/*
662	 * the label below is used in trap.c to detect FP faults in
663	 * kernel due to user fault.
664	 */
665	ALTENTRY(ndptrap_frstor)
666	.globl  _patch_xrstorq_rbx
667_patch_xrstorq_rbx:
668	FXRSTORQ	((%rbx))
669	cmpw	$KCS_SEL, REGOFF_CS(%rsp)
670	je	.return_to_kernel
671
672	ASSERT_UPCALL_MASK_IS_SET
673	USER_POP
674	IRET				/* return to user mode */
675	/*NOTREACHED*/
676
677.return_to_kernel:
678	INTR_POP
679	IRET
680	/*NOTREACHED*/
681
682.handle_in_trap:
683	INTR_POP
684	pushq	$0			/* can not use TRAP_NOERR */
685	pushq	$T_NOEXTFLT
686	jmp	cmninttrap
687	SET_SIZE(ndptrap_frstor)
688	SET_SIZE(ndptrap)
689
690#else	/* __xpv */
691
692	ENTRY_NP(ndptrap)
693	/*
694	 * We want to do this quickly as every lwp using fp will take this
695	 * after a context switch -- we do the frequent path in ndptrap_frstor
696	 * below; for all other cases, we let the trap code handle it
697	 */
698	pushq	%rax
699	pushq	%rbx
700	cmpw    $KCS_SEL, 24(%rsp)	/* did we come from kernel mode? */
701	jne     1f
702	LOADCPU(%rax)			/* if yes, don't swapgs */
703	jmp	2f
7041:
705	SWAPGS				/* if from user, need swapgs */
706	LOADCPU(%rax)
707	SWAPGS
7082:
709	/*
710	 * Xrstor needs to use edx as part of its flag.
711	 * NOTE: have to push rdx after "cmpw ...24(%rsp)", otherwise rsp+$24
712	 * will not point to CS.
713	 */
714	pushq	%rdx
715	cmpl	$0, fpu_exists(%rip)
716	je	.handle_in_trap		/* let trap handle no fp case */
717	movq	CPU_THREAD(%rax), %rbx	/* %rbx = curthread */
718	movl	$FPU_EN, %eax
719	movq	T_LWP(%rbx), %rbx	/* %rbx = lwp */
720	testq	%rbx, %rbx
721	jz	.handle_in_trap		/* should not happen? */
722#if LWP_PCB_FPU	!= 0
723	addq	$LWP_PCB_FPU, %rbx	/* &lwp->lwp_pcb.pcb_fpu */
724#endif
725	testl	%eax, PCB_FPU_FLAGS(%rbx)
726	jz	.handle_in_trap		/* must be the first fault */
727	clts
728	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%rbx)
729#if FPU_CTX_FPU_REGS != 0
730	addq	$FPU_CTX_FPU_REGS, %rbx
731#endif
732
733	movl	FPU_CTX_FPU_XSAVE_MASK(%rbx), %eax	/* for xrstor */
734	movl	FPU_CTX_FPU_XSAVE_MASK+4(%rbx), %edx	/* for xrstor */
735
736	/*
737	 * the label below is used in trap.c to detect FP faults in
738	 * kernel due to user fault.
739	 */
740	ALTENTRY(ndptrap_frstor)
741	.globl  _patch_xrstorq_rbx
742_patch_xrstorq_rbx:
743	FXRSTORQ	((%rbx))
744	popq	%rdx
745	popq	%rbx
746	popq	%rax
747	IRET
748	/*NOTREACHED*/
749
750.handle_in_trap:
751	popq	%rdx
752	popq	%rbx
753	popq	%rax
754	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
755	jmp	cmninttrap
756	SET_SIZE(ndptrap_frstor)
757	SET_SIZE(ndptrap)
758
759#endif	/* __xpv */
760
761#elif defined(__i386)
762
763	ENTRY_NP(ndptrap)
764	/*
765	 * We want to do this quickly as every lwp using fp will take this
766	 * after a context switch -- we do the frequent path in fpnoextflt
767	 * below; for all other cases, we let the trap code handle it
768	 */
769	pushl	%eax
770	pushl	%ebx
771	pushl	%edx			/* for xrstor */
772	pushl	%ds
773	pushl	%gs
774	movl	$KDS_SEL, %ebx
775	movw	%bx, %ds
776	movl	$KGS_SEL, %eax
777	movw	%ax, %gs
778	LOADCPU(%eax)
779	cmpl	$0, fpu_exists
780	je	.handle_in_trap		/* let trap handle no fp case */
781	movl	CPU_THREAD(%eax), %ebx	/* %ebx = curthread */
782	movl	$FPU_EN, %eax
783	movl	T_LWP(%ebx), %ebx	/* %ebx = lwp */
784	testl	%ebx, %ebx
785	jz	.handle_in_trap		/* should not happen? */
786#if LWP_PCB_FPU != 0
787	addl	$LWP_PCB_FPU, %ebx 	/* &lwp->lwp_pcb.pcb_fpu */
788#endif
789	testl	%eax, PCB_FPU_FLAGS(%ebx)
790	jz	.handle_in_trap		/* must be the first fault */
791	CLTS
792	andl	$_BITNOT(FPU_VALID), PCB_FPU_FLAGS(%ebx)
793#if FPU_CTX_FPU_REGS != 0
794	addl	$FPU_CTX_FPU_REGS, %ebx
795#endif
796
797	movl	FPU_CTX_FPU_XSAVE_MASK(%ebx), %eax	/* for xrstor */
798	movl	FPU_CTX_FPU_XSAVE_MASK+4(%ebx), %edx	/* for xrstor */
799
800	/*
801	 * the label below is used in trap.c to detect FP faults in kernel
802	 * due to user fault.
803	 */
804	ALTENTRY(ndptrap_frstor)
805	.globl  _patch_fxrstor_ebx
806_patch_fxrstor_ebx:
807	.globl  _patch_xrstor_ebx
808_patch_xrstor_ebx:
809	frstor	(%ebx)		/* may be patched to fxrstor */
810	nop			/* (including this byte) */
811	popl	%gs
812	popl	%ds
813	popl	%edx
814	popl	%ebx
815	popl	%eax
816	IRET
817
818.handle_in_trap:
819	popl	%gs
820	popl	%ds
821	popl	%edx
822	popl	%ebx
823	popl	%eax
824	TRAP_NOERR(T_NOEXTFLT)	/* $7 */
825	jmp	cmninttrap
826	SET_SIZE(ndptrap_frstor)
827	SET_SIZE(ndptrap)
828
829#endif	/* __i386 */
830
831#if !defined(__xpv)
832#if defined(__amd64)
833
834	/*
835	 * #DF
836	 */
837	ENTRY_NP(syserrtrap)
838	pushq	$T_DBLFLT
839	SET_CPU_GSBASE
840
841	/*
842	 * We share this handler with kmdb (if kmdb is loaded).  As such, we
843	 * may have reached this point after encountering a #df in kmdb.  If
844	 * that happens, we'll still be on kmdb's IDT.  We need to switch back
845	 * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
846	 * here from kmdb, kmdb is probably in a very sickly state, and
847	 * shouldn't be entered from the panic flow.  We'll suppress that
848	 * entry by setting nopanicdebug.
849	 */
850	pushq	%rax
851	subq	$DESCTBR_SIZE, %rsp
852	sidt	(%rsp)
853	movq	%gs:CPU_IDT, %rax
854	cmpq	%rax, DTR_BASE(%rsp)
855	je	1f
856
857	movq	%rax, DTR_BASE(%rsp)
858	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
859	lidt	(%rsp)
860
861	movl	$1, nopanicdebug
862
8631:	addq	$DESCTBR_SIZE, %rsp
864	popq	%rax
865
866	DFTRAP_PUSH
867
868	/*
869	 * freeze trap trace.
870	 */
871#ifdef TRAPTRACE
872	leaq	trap_trace_freeze(%rip), %r11
873	incl	(%r11)
874#endif
875
876	ENABLE_INTR_FLAGS
877
878	movq	%rsp, %rdi	/* &regs */
879	xorl	%esi, %esi	/* clear address */
880	xorl	%edx, %edx	/* cpuid = 0 */
881	call	trap
882
883	SET_SIZE(syserrtrap)
884
885#elif defined(__i386)
886
887	/*
888	 * #DF
889	 */
890	ENTRY_NP(syserrtrap)
891	cli				/* disable interrupts */
892
893	/*
894	 * We share this handler with kmdb (if kmdb is loaded).  As such, we
895	 * may have reached this point after encountering a #df in kmdb.  If
896	 * that happens, we'll still be on kmdb's IDT.  We need to switch back
897	 * to this CPU's IDT before proceeding.  Furthermore, if we did arrive
898	 * here from kmdb, kmdb is probably in a very sickly state, and
899	 * shouldn't be entered from the panic flow.  We'll suppress that
900	 * entry by setting nopanicdebug.
901	 */
902
903	subl	$DESCTBR_SIZE, %esp
904	movl	%gs:CPU_IDT, %eax
905	sidt	(%esp)
906	cmpl	DTR_BASE(%esp), %eax
907	je	1f
908
909	movl	%eax, DTR_BASE(%esp)
910	movw	$_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
911	lidt	(%esp)
912
913	movl	$1, nopanicdebug
914
9151:	addl	$DESCTBR_SIZE, %esp
916
917	/*
918	 * Check the CPL in the TSS to see what mode
919	 * (user or kernel) we took the fault in.  At this
920	 * point we are running in the context of the double
921	 * fault task (dftss) but the CPU's task points to
922	 * the previous task (ktss) where the process context
923	 * has been saved as the result of the task switch.
924	 */
925	movl	%gs:CPU_TSS, %eax	/* get the TSS */
926	movl	TSS_SS(%eax), %ebx	/* save the fault SS */
927	movl	TSS_ESP(%eax), %edx	/* save the fault ESP */
928	testw	$CPL_MASK, TSS_CS(%eax)	/* user mode ? */
929	jz	make_frame
930	movw	TSS_SS0(%eax), %ss	/* get on the kernel stack */
931	movl	TSS_ESP0(%eax), %esp
932
933	/*
934	 * Clear the NT flag to avoid a task switch when the process
935	 * finally pops the EFL off the stack via an iret.  Clear
936	 * the TF flag since that is what the processor does for
937	 * a normal exception. Clear the IE flag so that interrupts
938	 * remain disabled.
939	 */
940	movl	TSS_EFL(%eax), %ecx
941	andl	$_BITNOT(PS_NT|PS_T|PS_IE), %ecx
942	pushl	%ecx
943	popfl				/* restore the EFL */
944	movw	TSS_LDT(%eax), %cx	/* restore the LDT */
945	lldt	%cx
946
947	/*
948	 * Restore process segment selectors.
949	 */
950	movw	TSS_DS(%eax), %ds
951	movw	TSS_ES(%eax), %es
952	movw	TSS_FS(%eax), %fs
953	movw	TSS_GS(%eax), %gs
954
955	/*
956	 * Restore task segment selectors.
957	 */
958	movl	$KDS_SEL, TSS_DS(%eax)
959	movl	$KDS_SEL, TSS_ES(%eax)
960	movl	$KDS_SEL, TSS_SS(%eax)
961	movl	$KFS_SEL, TSS_FS(%eax)
962	movl	$KGS_SEL, TSS_GS(%eax)
963
964	/*
965	 * Clear the TS bit, the busy bits in both task
966	 * descriptors, and switch tasks.
967	 */
968	clts
969	leal	gdt0, %ecx
970	movl	DFTSS_SEL+4(%ecx), %esi
971	andl	$_BITNOT(0x200), %esi
972	movl	%esi, DFTSS_SEL+4(%ecx)
973	movl	KTSS_SEL+4(%ecx), %esi
974	andl	$_BITNOT(0x200), %esi
975	movl	%esi, KTSS_SEL+4(%ecx)
976	movw	$KTSS_SEL, %cx
977	ltr	%cx
978
979	/*
980	 * Restore part of the process registers.
981	 */
982	movl	TSS_EBP(%eax), %ebp
983	movl	TSS_ECX(%eax), %ecx
984	movl	TSS_ESI(%eax), %esi
985	movl	TSS_EDI(%eax), %edi
986
987make_frame:
988	/*
989	 * Make a trap frame.  Leave the error code (0) on
990	 * the stack since the first word on a trap stack is
991	 * unused anyway.
992	 */
993	pushl	%ebx			/ fault SS
994	pushl	%edx			/ fault ESP
995	pushl	TSS_EFL(%eax)		/ fault EFL
996	pushl	TSS_CS(%eax)		/ fault CS
997	pushl	TSS_EIP(%eax)		/ fault EIP
998	pushl	$0			/ error code
999	pushl	$T_DBLFLT		/ trap number 8
1000	movl	TSS_EBX(%eax), %ebx	/ restore EBX
1001	movl	TSS_EDX(%eax), %edx	/ restore EDX
1002	movl	TSS_EAX(%eax), %eax	/ restore EAX
1003	sti				/ enable interrupts
1004	jmp	cmntrap
1005	SET_SIZE(syserrtrap)
1006
1007#endif	/* __i386 */
1008#endif	/* !__xpv */
1009
1010	ENTRY_NP(overrun)
1011	push	$0
1012	TRAP_NOERR(T_EXTOVRFLT)	/* $9 i386 only - not generated */
1013	jmp	cmninttrap
1014	SET_SIZE(overrun)
1015
1016	/*
1017	 * #TS
1018	 */
1019	ENTRY_NP(invtsstrap)
1020	TRAP_ERR(T_TSSFLT)	/* $10 already have error code on stack */
1021	jmp	cmntrap
1022	SET_SIZE(invtsstrap)
1023
1024	/*
1025	 * #NP
1026	 */
1027	ENTRY_NP(segnptrap)
1028	TRAP_ERR(T_SEGFLT)	/* $11 already have error code on stack */
1029#if defined(__amd64)
1030	SET_CPU_GSBASE
1031#endif
1032	jmp	cmntrap
1033	SET_SIZE(segnptrap)
1034
1035	/*
1036	 * #SS
1037	 */
1038	ENTRY_NP(stktrap)
1039	TRAP_ERR(T_STKFLT)	/* $12 already have error code on stack */
1040	jmp	cmntrap
1041	SET_SIZE(stktrap)
1042
1043	/*
1044	 * #GP
1045	 */
1046	ENTRY_NP(gptrap)
1047	TRAP_ERR(T_GPFLT)	/* $13 already have error code on stack */
1048#if defined(__amd64)
1049	SET_CPU_GSBASE
1050#endif
1051	jmp	cmntrap
1052	SET_SIZE(gptrap)
1053
1054	/*
1055	 * #PF
1056	 */
1057	ENTRY_NP(pftrap)
1058	TRAP_ERR(T_PGFLT)	/* $14 already have error code on stack */
1059	INTR_PUSH
1060#if defined(__xpv)
1061
1062#if defined(__amd64)
1063	movq	%gs:CPU_VCPU_INFO, %r15
1064	movq	VCPU_INFO_ARCH_CR2(%r15), %r15	/* vcpu[].arch.cr2 */
1065#elif defined(__i386)
1066	movl	%gs:CPU_VCPU_INFO, %esi
1067	movl	VCPU_INFO_ARCH_CR2(%esi), %esi	/* vcpu[].arch.cr2 */
1068#endif	/* __i386 */
1069
1070#else	/* __xpv */
1071
1072#if defined(__amd64)
1073	movq	%cr2, %r15
1074#elif defined(__i386)
1075	movl	%cr2, %esi
1076#endif	/* __i386 */
1077
1078#endif	/* __xpv */
1079	jmp	cmntrap_pushed
1080	SET_SIZE(pftrap)
1081
1082#if !defined(__amd64)
1083
1084	.globl	idt0_default_r
1085
1086	/*
1087	 * #PF pentium bug workaround
1088	 */
1089	ENTRY_NP(pentium_pftrap)
1090	pushl	%eax
1091	movl	%cr2, %eax
1092	andl	$MMU_STD_PAGEMASK, %eax
1093
1094	cmpl	%eax, %cs:idt0_default_r+2	/* fixme */
1095
1096	je	check_for_user_address
1097user_mode:
1098	popl	%eax
1099	pushl	$T_PGFLT	/* $14 */
1100	jmp	cmntrap
1101check_for_user_address:
1102	/*
1103	 * Before we assume that we have an unmapped trap on our hands,
1104	 * check to see if this is a fault from user mode.  If it is,
1105	 * we'll kick back into the page fault handler.
1106	 */
1107	movl	4(%esp), %eax	/* error code */
1108	andl	$PF_ERR_USER, %eax
1109	jnz	user_mode
1110
1111	/*
1112	 * We now know that this is the invalid opcode trap.
1113	 */
1114	popl	%eax
1115	addl	$4, %esp	/* pop error code */
1116	jmp	invoptrap
1117	SET_SIZE(pentium_pftrap)
1118
1119#endif	/* !__amd64 */
1120
1121	ENTRY_NP(resvtrap)
1122	TRAP_NOERR(15)		/* (reserved)  */
1123	jmp	cmntrap
1124	SET_SIZE(resvtrap)
1125
1126	/*
1127	 * #MF
1128	 */
1129	ENTRY_NP(ndperr)
1130	TRAP_NOERR(T_EXTERRFLT)	/* $16 */
1131	jmp	cmninttrap
1132	SET_SIZE(ndperr)
1133
1134	/*
1135	 * #AC
1136	 */
1137	ENTRY_NP(achktrap)
1138	TRAP_ERR(T_ALIGNMENT)	/* $17 */
1139	jmp	cmntrap
1140	SET_SIZE(achktrap)
1141
1142	/*
1143	 * #MC
1144	 */
1145	.globl	cmi_mca_trap	/* see uts/i86pc/os/cmi.c */
1146
1147#if defined(__amd64)
1148
1149	ENTRY_NP(mcetrap)
1150	TRAP_NOERR(T_MCE)	/* $18 */
1151
1152	SET_CPU_GSBASE
1153
1154	INTR_PUSH
1155	INTGATE_INIT_KERNEL_FLAGS
1156
1157	TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
1158	TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
1159	TRACE_STAMP(%rdi)
1160
1161	movq	%rsp, %rbp
1162
1163	movq	%rsp, %rdi	/* arg0 = struct regs *rp */
1164	call	cmi_mca_trap	/* cmi_mca_trap(rp); */
1165
1166	jmp	_sys_rtt
1167	SET_SIZE(mcetrap)
1168
1169#else
1170
1171	ENTRY_NP(mcetrap)
1172	TRAP_NOERR(T_MCE)	/* $18 */
1173
1174	INTR_PUSH
1175	INTGATE_INIT_KERNEL_FLAGS
1176
1177	TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
1178	TRACE_REGS(%edi, %esp, %ebx, %ecx)
1179	TRACE_STAMP(%edi)
1180
1181	movl	%esp, %ebp
1182
1183	movl	%esp, %ecx
1184	pushl	%ecx		/* arg0 = struct regs *rp */
1185	call	cmi_mca_trap	/* cmi_mca_trap(rp) */
1186	addl	$4, %esp	/* pop arg0 */
1187
1188	jmp	_sys_rtt
1189	SET_SIZE(mcetrap)
1190
1191#endif
1192
1193	/*
1194	 * #XF
1195	 */
1196	ENTRY_NP(xmtrap)
1197	TRAP_NOERR(T_SIMDFPE)	/* $19 */
1198	jmp	cmninttrap
1199	SET_SIZE(xmtrap)
1200
1201	ENTRY_NP(invaltrap)
1202	TRAP_NOERR(30)		/* very invalid */
1203	jmp	cmntrap
1204	SET_SIZE(invaltrap)
1205
1206	ENTRY_NP(invalint)
1207	TRAP_NOERR(31)		/* even more so */
1208	jmp	cmnint
1209	SET_SIZE(invalint)
1210
1211	.globl	fasttable
1212
1213#if defined(__amd64)
1214
1215	ENTRY_NP(fasttrap)
1216	cmpl	$T_LASTFAST, %eax
1217	ja	1f
1218	orl	%eax, %eax	/* (zero extend top 32-bits) */
1219	leaq	fasttable(%rip), %r11
1220	leaq	(%r11, %rax, CLONGSIZE), %r11
1221	jmp	*(%r11)
12221:
1223	/*
1224	 * Fast syscall number was illegal.  Make it look
1225	 * as if the INT failed.  Modify %rip to point before the
1226	 * INT, push the expected error code and fake a GP fault.
1227	 *
1228	 * XXX Why make the error code be offset into idt + 1?
1229	 * Instead we should push a real (soft?) error code
1230	 * on the stack and #gp handler could know about fasttraps?
1231	 */
1232	XPV_TRAP_POP
1233
1234	subq	$2, (%rsp)	/* XXX int insn 2-bytes */
1235	pushq	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1236
1237#if defined(__xpv)
1238	pushq	%r11
1239	pushq	%rcx
1240#endif
1241	jmp	gptrap
1242	SET_SIZE(fasttrap)
1243
1244#elif defined(__i386)
1245
1246	ENTRY_NP(fasttrap)
1247	cmpl	$T_LASTFAST, %eax
1248	ja	1f
1249	jmp	*%cs:fasttable(, %eax, CLONGSIZE)
12501:
1251	/*
1252	 * Fast syscall number was illegal.  Make it look
1253	 * as if the INT failed.  Modify %eip to point before the
1254	 * INT, push the expected error code and fake a GP fault.
1255	 *
1256	 * XXX Why make the error code be offset into idt + 1?
1257	 * Instead we should push a real (soft?) error code
1258	 * on the stack and #gp handler could know about fasttraps?
1259	 */
1260	subl	$2, (%esp)	/* XXX int insn 2-bytes */
1261	pushl	$_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1262	jmp	gptrap
1263	SET_SIZE(fasttrap)
1264
1265#endif	/* __i386 */
1266
1267	ENTRY_NP(dtrace_ret)
1268	TRAP_NOERR(T_DTRACE_RET)
1269	jmp	dtrace_trap
1270	SET_SIZE(dtrace_ret)
1271
1272#if defined(__amd64)
1273
1274	/*
1275	 * RFLAGS 24 bytes up the stack from %rsp.
1276	 * XXX a constant would be nicer.
1277	 */
1278	ENTRY_NP(fast_null)
1279	XPV_TRAP_POP
1280	orq	$PS_C, 24(%rsp)	/* set carry bit in user flags */
1281	IRET
1282	/*NOTREACHED*/
1283	SET_SIZE(fast_null)
1284
1285#elif defined(__i386)
1286
1287	ENTRY_NP(fast_null)
1288	orw	$PS_C, 8(%esp)	/* set carry bit in user flags */
1289	IRET
1290	SET_SIZE(fast_null)
1291
1292#endif	/* __i386 */
1293
1294	/*
1295	 * Interrupts start at 32
1296	 */
1297#define MKIVCT(n)			\
1298	ENTRY_NP(ivct/**/n)		\
1299	push	$0;			\
1300	push	$n - 0x20;		\
1301	jmp	cmnint;			\
1302	SET_SIZE(ivct/**/n)
1303
1304	MKIVCT(32)
1305	MKIVCT(33)
1306	MKIVCT(34)
1307	MKIVCT(35)
1308	MKIVCT(36)
1309	MKIVCT(37)
1310	MKIVCT(38)
1311	MKIVCT(39)
1312	MKIVCT(40)
1313	MKIVCT(41)
1314	MKIVCT(42)
1315	MKIVCT(43)
1316	MKIVCT(44)
1317	MKIVCT(45)
1318	MKIVCT(46)
1319	MKIVCT(47)
1320	MKIVCT(48)
1321	MKIVCT(49)
1322	MKIVCT(50)
1323	MKIVCT(51)
1324	MKIVCT(52)
1325	MKIVCT(53)
1326	MKIVCT(54)
1327	MKIVCT(55)
1328	MKIVCT(56)
1329	MKIVCT(57)
1330	MKIVCT(58)
1331	MKIVCT(59)
1332	MKIVCT(60)
1333	MKIVCT(61)
1334	MKIVCT(62)
1335	MKIVCT(63)
1336	MKIVCT(64)
1337	MKIVCT(65)
1338	MKIVCT(66)
1339	MKIVCT(67)
1340	MKIVCT(68)
1341	MKIVCT(69)
1342	MKIVCT(70)
1343	MKIVCT(71)
1344	MKIVCT(72)
1345	MKIVCT(73)
1346	MKIVCT(74)
1347	MKIVCT(75)
1348	MKIVCT(76)
1349	MKIVCT(77)
1350	MKIVCT(78)
1351	MKIVCT(79)
1352	MKIVCT(80)
1353	MKIVCT(81)
1354	MKIVCT(82)
1355	MKIVCT(83)
1356	MKIVCT(84)
1357	MKIVCT(85)
1358	MKIVCT(86)
1359	MKIVCT(87)
1360	MKIVCT(88)
1361	MKIVCT(89)
1362	MKIVCT(90)
1363	MKIVCT(91)
1364	MKIVCT(92)
1365	MKIVCT(93)
1366	MKIVCT(94)
1367	MKIVCT(95)
1368	MKIVCT(96)
1369	MKIVCT(97)
1370	MKIVCT(98)
1371	MKIVCT(99)
1372	MKIVCT(100)
1373	MKIVCT(101)
1374	MKIVCT(102)
1375	MKIVCT(103)
1376	MKIVCT(104)
1377	MKIVCT(105)
1378	MKIVCT(106)
1379	MKIVCT(107)
1380	MKIVCT(108)
1381	MKIVCT(109)
1382	MKIVCT(110)
1383	MKIVCT(111)
1384	MKIVCT(112)
1385	MKIVCT(113)
1386	MKIVCT(114)
1387	MKIVCT(115)
1388	MKIVCT(116)
1389	MKIVCT(117)
1390	MKIVCT(118)
1391	MKIVCT(119)
1392	MKIVCT(120)
1393	MKIVCT(121)
1394	MKIVCT(122)
1395	MKIVCT(123)
1396	MKIVCT(124)
1397	MKIVCT(125)
1398	MKIVCT(126)
1399	MKIVCT(127)
1400	MKIVCT(128)
1401	MKIVCT(129)
1402	MKIVCT(130)
1403	MKIVCT(131)
1404	MKIVCT(132)
1405	MKIVCT(133)
1406	MKIVCT(134)
1407	MKIVCT(135)
1408	MKIVCT(136)
1409	MKIVCT(137)
1410	MKIVCT(138)
1411	MKIVCT(139)
1412	MKIVCT(140)
1413	MKIVCT(141)
1414	MKIVCT(142)
1415	MKIVCT(143)
1416	MKIVCT(144)
1417	MKIVCT(145)
1418	MKIVCT(146)
1419	MKIVCT(147)
1420	MKIVCT(148)
1421	MKIVCT(149)
1422	MKIVCT(150)
1423	MKIVCT(151)
1424	MKIVCT(152)
1425	MKIVCT(153)
1426	MKIVCT(154)
1427	MKIVCT(155)
1428	MKIVCT(156)
1429	MKIVCT(157)
1430	MKIVCT(158)
1431	MKIVCT(159)
1432	MKIVCT(160)
1433	MKIVCT(161)
1434	MKIVCT(162)
1435	MKIVCT(163)
1436	MKIVCT(164)
1437	MKIVCT(165)
1438	MKIVCT(166)
1439	MKIVCT(167)
1440	MKIVCT(168)
1441	MKIVCT(169)
1442	MKIVCT(170)
1443	MKIVCT(171)
1444	MKIVCT(172)
1445	MKIVCT(173)
1446	MKIVCT(174)
1447	MKIVCT(175)
1448	MKIVCT(176)
1449	MKIVCT(177)
1450	MKIVCT(178)
1451	MKIVCT(179)
1452	MKIVCT(180)
1453	MKIVCT(181)
1454	MKIVCT(182)
1455	MKIVCT(183)
1456	MKIVCT(184)
1457	MKIVCT(185)
1458	MKIVCT(186)
1459	MKIVCT(187)
1460	MKIVCT(188)
1461	MKIVCT(189)
1462	MKIVCT(190)
1463	MKIVCT(191)
1464	MKIVCT(192)
1465	MKIVCT(193)
1466	MKIVCT(194)
1467	MKIVCT(195)
1468	MKIVCT(196)
1469	MKIVCT(197)
1470	MKIVCT(198)
1471	MKIVCT(199)
1472	MKIVCT(200)
1473	MKIVCT(201)
1474	MKIVCT(202)
1475	MKIVCT(203)
1476	MKIVCT(204)
1477	MKIVCT(205)
1478	MKIVCT(206)
1479	MKIVCT(207)
1480	MKIVCT(208)
1481	MKIVCT(209)
1482	MKIVCT(210)
1483	MKIVCT(211)
1484	MKIVCT(212)
1485	MKIVCT(213)
1486	MKIVCT(214)
1487	MKIVCT(215)
1488	MKIVCT(216)
1489	MKIVCT(217)
1490	MKIVCT(218)
1491	MKIVCT(219)
1492	MKIVCT(220)
1493	MKIVCT(221)
1494	MKIVCT(222)
1495	MKIVCT(223)
1496	MKIVCT(224)
1497	MKIVCT(225)
1498	MKIVCT(226)
1499	MKIVCT(227)
1500	MKIVCT(228)
1501	MKIVCT(229)
1502	MKIVCT(230)
1503	MKIVCT(231)
1504	MKIVCT(232)
1505	MKIVCT(233)
1506	MKIVCT(234)
1507	MKIVCT(235)
1508	MKIVCT(236)
1509	MKIVCT(237)
1510	MKIVCT(238)
1511	MKIVCT(239)
1512	MKIVCT(240)
1513	MKIVCT(241)
1514	MKIVCT(242)
1515	MKIVCT(243)
1516	MKIVCT(244)
1517	MKIVCT(245)
1518	MKIVCT(246)
1519	MKIVCT(247)
1520	MKIVCT(248)
1521	MKIVCT(249)
1522	MKIVCT(250)
1523	MKIVCT(251)
1524	MKIVCT(252)
1525	MKIVCT(253)
1526	MKIVCT(254)
1527	MKIVCT(255)
1528
1529#endif	/* __lint */
1530