xref: /titanic_41/usr/src/uts/intel/ia32/os/desctbls.c (revision 5a59a8b3d86e67dbe75588879c46e3629f40efec)
1 /*
2  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 #pragma ident	"%Z%%M%	%I%	%E% SMI"
7 
8 /*
9  * Copyright (c) 1992 Terrence R. Lambert.
10  * Copyright (c) 1990 The Regents of the University of California.
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to Berkeley by
14  * William Jolitz.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	from: @(#)machdep.c	7.4 (Berkeley) 6/3/91
45  */
46 
47 #include <sys/types.h>
48 #include <sys/tss.h>
49 #include <sys/segments.h>
50 #include <sys/trap.h>
51 #include <sys/cpuvar.h>
52 #include <sys/x86_archext.h>
53 #include <sys/archsystm.h>
54 #include <sys/machsystm.h>
55 #include <sys/kobj.h>
56 #include <sys/cmn_err.h>
57 #include <sys/reboot.h>
58 #include <sys/kdi.h>
59 #include <sys/systm.h>
60 
61 extern void syscall_int(void);
62 
63 /*
64  * cpu0 and default tables and structures.
65  */
66 #pragma	align	16(gdt0)
67 user_desc_t	gdt0[NGDT];		/* global descriptor table */
68 desctbr_t	gdt0_default_r;
69 
70 #pragma	align	16(idt0)
71 gate_desc_t	idt0[NIDT]; 		/* interrupt descriptor table */
72 desctbr_t	idt0_default_r;		/* describes idt0 in IDTR format */
73 
74 #pragma align	16(ktss0)
75 struct tss	ktss0;			/* kernel task state structure */
76 
77 #if defined(__i386)
78 #pragma align	16(dftss0)
79 struct tss	dftss0;			/* #DF double-fault exception */
80 #endif	/* __i386 */
81 
82 user_desc_t	zero_udesc;		/* base zero user desc native procs */
83 system_desc_t	zero_sdesc;
84 
85 #if defined(__amd64)
86 user_desc_t	zero_u32desc;		/* 32-bit compatibility procs */
87 #endif	/* __amd64 */
88 
89 #pragma	align	16(dblfault_stack0)
90 char		dblfault_stack0[DEFAULTSTKSZ];
91 
92 extern void	fast_null(void);
93 extern hrtime_t	get_hrtime(void);
94 extern hrtime_t	gethrvtime(void);
95 extern hrtime_t	get_hrestime(void);
96 extern uint64_t	getlgrp(void);
97 
98 void (*(fasttable[]))(void) = {
99 	fast_null,			/* T_FNULL routine */
100 	fast_null,			/* T_FGETFP routine (initially null) */
101 	fast_null,			/* T_FSETFP routine (initially null) */
102 	(void (*)())get_hrtime,		/* T_GETHRTIME */
103 	(void (*)())gethrvtime,		/* T_GETHRVTIME */
104 	(void (*)())get_hrestime,	/* T_GETHRESTIME */
105 	(void (*)())getlgrp		/* T_GETLGRP */
106 };
107 
108 /*
109  * software prototypes for default local descriptor table
110  */
111 
112 /*
113  * Routines for loading segment descriptors in format the hardware
114  * can understand.
115  */
116 
117 #if defined(__amd64)
118 
119 /*
120  * In long mode we have the new L or long mode attribute bit
121  * for code segments. Only the conforming bit in type is used along
122  * with descriptor priority and present bits. Default operand size must
123  * be zero when in long mode. In 32-bit compatibility mode all fields
124  * are treated as in legacy mode. For data segments while in long mode
125  * only the present bit is loaded.
126  */
127 void
128 set_usegd(user_desc_t *dp, uint_t lmode, void *base, size_t size,
129     uint_t type, uint_t dpl, uint_t gran, uint_t defopsz)
130 {
131 	ASSERT(lmode == SDP_SHORT || lmode == SDP_LONG);
132 
133 	/*
134 	 * 64-bit long mode.
135 	 */
136 	if (lmode == SDP_LONG)
137 		dp->usd_def32 = 0;		/* 32-bit operands only */
138 	else
139 		/*
140 		 * 32-bit compatibility mode.
141 		 */
142 		dp->usd_def32 = defopsz;	/* 0 = 16, 1 = 32-bit ops */
143 
144 	dp->usd_long = lmode;	/* 64-bit mode */
145 	dp->usd_type = type;
146 	dp->usd_dpl = dpl;
147 	dp->usd_p = 1;
148 	dp->usd_gran = gran;		/* 0 = bytes, 1 = pages */
149 
150 	dp->usd_lobase = (uintptr_t)base;
151 	dp->usd_midbase = (uintptr_t)base >> 16;
152 	dp->usd_hibase = (uintptr_t)base >> (16 + 8);
153 	dp->usd_lolimit = size;
154 	dp->usd_hilimit = (uintptr_t)size >> 16;
155 }
156 
157 #elif defined(__i386)
158 
159 /*
160  * Install user segment descriptor for code and data.
161  */
162 void
163 set_usegd(user_desc_t *dp, void *base, size_t size, uint_t type,
164     uint_t dpl, uint_t gran, uint_t defopsz)
165 {
166 	dp->usd_lolimit = size;
167 	dp->usd_hilimit = (uintptr_t)size >> 16;
168 
169 	dp->usd_lobase = (uintptr_t)base;
170 	dp->usd_midbase = (uintptr_t)base >> 16;
171 	dp->usd_hibase = (uintptr_t)base >> (16 + 8);
172 
173 	dp->usd_type = type;
174 	dp->usd_dpl = dpl;
175 	dp->usd_p = 1;
176 	dp->usd_def32 = defopsz;	/* 0 = 16, 1 = 32 bit operands */
177 	dp->usd_gran = gran;		/* 0 = bytes, 1 = pages */
178 }
179 
180 #endif	/* __i386 */
181 
182 /*
183  * Install system segment descriptor for LDT and TSS segments.
184  */
185 
186 #if defined(__amd64)
187 
188 void
189 set_syssegd(system_desc_t *dp, void *base, size_t size, uint_t type,
190     uint_t dpl)
191 {
192 	dp->ssd_lolimit = size;
193 	dp->ssd_hilimit = (uintptr_t)size >> 16;
194 
195 	dp->ssd_lobase = (uintptr_t)base;
196 	dp->ssd_midbase = (uintptr_t)base >> 16;
197 	dp->ssd_hibase = (uintptr_t)base >> (16 + 8);
198 	dp->ssd_hi64base = (uintptr_t)base >> (16 + 8 + 8);
199 
200 	dp->ssd_type = type;
201 	dp->ssd_zero1 = 0;	/* must be zero */
202 	dp->ssd_zero2 = 0;
203 	dp->ssd_dpl = dpl;
204 	dp->ssd_p = 1;
205 	dp->ssd_gran = 0;	/* force byte units */
206 }
207 
208 #elif defined(__i386)
209 
210 void
211 set_syssegd(system_desc_t *dp, void *base, size_t size, uint_t type,
212     uint_t dpl)
213 {
214 	dp->ssd_lolimit = size;
215 	dp->ssd_hilimit = (uintptr_t)size >> 16;
216 
217 	dp->ssd_lobase = (uintptr_t)base;
218 	dp->ssd_midbase = (uintptr_t)base >> 16;
219 	dp->ssd_hibase = (uintptr_t)base >> (16 + 8);
220 
221 	dp->ssd_type = type;
222 	dp->ssd_zero = 0;	/* must be zero */
223 	dp->ssd_dpl = dpl;
224 	dp->ssd_p = 1;
225 	dp->ssd_gran = 0;	/* force byte units */
226 }
227 
228 #endif	/* __i386 */
229 
230 /*
231  * Install gate segment descriptor for interrupt, trap, call and task gates.
232  */
233 
234 #if defined(__amd64)
235 
236 /*
237  * Note stkcpy is replaced with ist. Read the PRM for details on this.
238  */
239 void
240 set_gatesegd(gate_desc_t *dp, void (*func)(void), selector_t sel, uint_t ist,
241     uint_t type, uint_t dpl)
242 {
243 	dp->sgd_looffset = (uintptr_t)func;
244 	dp->sgd_hioffset = (uintptr_t)func >> 16;
245 	dp->sgd_hi64offset = (uintptr_t)func >> (16 + 16);
246 
247 	dp->sgd_selector =  (uint16_t)sel;
248 	dp->sgd_ist = ist;
249 	dp->sgd_type = type;
250 	dp->sgd_dpl = dpl;
251 	dp->sgd_p = 1;
252 }
253 
254 #elif defined(__i386)
255 
256 void
257 set_gatesegd(gate_desc_t *dp, void (*func)(void), selector_t sel,
258     uint_t wcount, uint_t type, uint_t dpl)
259 {
260 	dp->sgd_looffset = (uintptr_t)func;
261 	dp->sgd_hioffset = (uintptr_t)func >> 16;
262 
263 	dp->sgd_selector =  (uint16_t)sel;
264 	dp->sgd_stkcpy = wcount;
265 	dp->sgd_type = type;
266 	dp->sgd_dpl = dpl;
267 	dp->sgd_p = 1;
268 }
269 
270 #endif /* __i386 */
271 
272 /*
273  * Build kernel GDT.
274  */
275 
276 #if defined(__amd64)
277 
278 static void
279 init_gdt(void)
280 {
281 	desctbr_t	r_bgdt, r_gdt;
282 	user_desc_t	*bgdt;
283 	size_t		alen = 0xfffff;	/* entire 32-bit address space */
284 
285 	/*
286 	 * Copy in from boot's gdt to our gdt entries 1 - 4.
287 	 * Entry 0 is the null descriptor by definition.
288 	 */
289 	rd_gdtr(&r_bgdt);
290 	bgdt = (user_desc_t *)r_bgdt.dtr_base;
291 	if (bgdt == NULL)
292 		panic("null boot gdt");
293 
294 	gdt0[GDT_B32DATA] = bgdt[GDT_B32DATA];
295 	gdt0[GDT_B32CODE] = bgdt[GDT_B32CODE];
296 	gdt0[GDT_B64DATA] = bgdt[GDT_B64DATA];
297 	gdt0[GDT_B64CODE] = bgdt[GDT_B64CODE];
298 
299 	/*
300 	 * 64-bit kernel code segment.
301 	 */
302 	set_usegd(&gdt0[GDT_KCODE], SDP_LONG, NULL, 0, SDT_MEMERA, SEL_KPL,
303 	    SDP_PAGES, SDP_OP32);
304 
305 	/*
306 	 * 64-bit kernel data segment. The limit attribute is ignored in 64-bit
307 	 * mode, but we set it here to 0xFFFF so that we can use the SYSRET
308 	 * instruction to return from system calls back to 32-bit applications.
309 	 * SYSRET doesn't update the base, limit, or attributes of %ss or %ds
310 	 * descriptors. We therefore must ensure that the kernel uses something,
311 	 * though it will be ignored by hardware, that is compatible with 32-bit
312 	 * apps. For the same reason we must set the default op size of this
313 	 * descriptor to 32-bit operands.
314 	 */
315 	set_usegd(&gdt0[GDT_KDATA], SDP_LONG, NULL, alen, SDT_MEMRWA,
316 	    SEL_KPL, SDP_PAGES, SDP_OP32);
317 	gdt0[GDT_KDATA].usd_def32 = 1;
318 
319 	/*
320 	 * 64-bit user code segment.
321 	 */
322 	set_usegd(&gdt0[GDT_UCODE], SDP_LONG, NULL, 0, SDT_MEMERA, SEL_UPL,
323 	    SDP_PAGES, SDP_OP32);
324 
325 	/*
326 	 * 32-bit user code segment.
327 	 */
328 	set_usegd(&gdt0[GDT_U32CODE], SDP_SHORT, NULL, alen, SDT_MEMERA,
329 	    SEL_UPL, SDP_PAGES, SDP_OP32);
330 
331 	/*
332 	 * 32 and 64 bit data segments can actually share the same descriptor.
333 	 * In long mode only the present bit is checked but all other fields
334 	 * are loaded. But in compatibility mode all fields are interpreted
335 	 * as in legacy mode so they must be set correctly for a 32-bit data
336 	 * segment.
337 	 */
338 	set_usegd(&gdt0[GDT_UDATA], SDP_SHORT, NULL, alen, SDT_MEMRWA, SEL_UPL,
339 	    SDP_PAGES, SDP_OP32);
340 
341 	/*
342 	 * The 64-bit kernel has no default LDT. By default, the LDT descriptor
343 	 * in the GDT is 0.
344 	 */
345 
346 	/*
347 	 * Kernel TSS
348 	 */
349 	set_syssegd((system_desc_t *)&gdt0[GDT_KTSS], &ktss0,
350 	    sizeof (ktss0) - 1, SDT_SYSTSS, SEL_KPL);
351 
352 	/*
353 	 * Initialize fs and gs descriptors for 32 bit processes.
354 	 * Only attributes and limits are initialized, the effective
355 	 * base address is programmed via fsbase/gsbase.
356 	 */
357 	set_usegd(&gdt0[GDT_LWPFS], SDP_SHORT, NULL, alen, SDT_MEMRWA,
358 	    SEL_UPL, SDP_PAGES, SDP_OP32);
359 	set_usegd(&gdt0[GDT_LWPGS], SDP_SHORT, NULL, alen, SDT_MEMRWA,
360 	    SEL_UPL, SDP_PAGES, SDP_OP32);
361 
362 	/*
363 	 * Install our new GDT
364 	 */
365 	r_gdt.dtr_limit = sizeof (gdt0) - 1;
366 	r_gdt.dtr_base = (uintptr_t)gdt0;
367 	wr_gdtr(&r_gdt);
368 
369 	/*
370 	 * Initialize convenient zero base user descriptors for clearing
371 	 * lwp private %fs and %gs descriptors in GDT. See setregs() for
372 	 * an example.
373 	 */
374 	set_usegd(&zero_udesc, SDP_LONG, 0, 0, SDT_MEMRWA, SEL_UPL,
375 	    SDP_BYTES, SDP_OP32);
376 	set_usegd(&zero_u32desc, SDP_SHORT, 0, -1, SDT_MEMRWA, SEL_UPL,
377 	    SDP_PAGES, SDP_OP32);
378 }
379 
380 #elif defined(__i386)
381 
382 static void
383 init_gdt(void)
384 {
385 	desctbr_t	r_bgdt, r_gdt;
386 	user_desc_t	*bgdt;
387 
388 	/*
389 	 * Copy in from boot's gdt to our gdt entries 1 - 4.
390 	 * Entry 0 is null descriptor by definition.
391 	 */
392 	rd_gdtr(&r_bgdt);
393 	bgdt = (user_desc_t *)r_bgdt.dtr_base;
394 	if (bgdt == NULL)
395 		panic("null boot gdt");
396 
397 	gdt0[GDT_BOOTFLAT] = bgdt[GDT_BOOTFLAT];
398 	gdt0[GDT_BOOTCODE] = bgdt[GDT_BOOTCODE];
399 	gdt0[GDT_BOOTCODE16] = bgdt[GDT_BOOTCODE16];
400 	gdt0[GDT_BOOTDATA] = bgdt[GDT_BOOTDATA];
401 
402 	/*
403 	 * Text and data for both kernel and user span entire 32 bit
404 	 * address space.
405 	 */
406 
407 	/*
408 	 * kernel code segment.
409 	 */
410 	set_usegd(&gdt0[GDT_KCODE], NULL, -1, SDT_MEMERA, SEL_KPL, SDP_PAGES,
411 	    SDP_OP32);
412 
413 	/*
414 	 * kernel data segment.
415 	 */
416 	set_usegd(&gdt0[GDT_KDATA], NULL, -1, SDT_MEMRWA, SEL_KPL, SDP_PAGES,
417 	    SDP_OP32);
418 
419 	/*
420 	 * user code segment.
421 	 */
422 	set_usegd(&gdt0[GDT_UCODE], NULL, -1, SDT_MEMERA, SEL_UPL, SDP_PAGES,
423 	    SDP_OP32);
424 
425 	/*
426 	 * user data segment.
427 	 */
428 	set_usegd(&gdt0[GDT_UDATA], NULL, -1, SDT_MEMRWA, SEL_UPL, SDP_PAGES,
429 	    SDP_OP32);
430 
431 	/*
432 	 * TSS for T_DBLFLT (double fault) handler
433 	 */
434 	set_syssegd((system_desc_t *)&gdt0[GDT_DBFLT], &dftss0,
435 	    sizeof (dftss0) - 1, SDT_SYSTSS, SEL_KPL);
436 
437 	/*
438 	 * TSS for kernel
439 	 */
440 	set_syssegd((system_desc_t *)&gdt0[GDT_KTSS], &ktss0,
441 	    sizeof (ktss0) - 1, SDT_SYSTSS, SEL_KPL);
442 
443 	/*
444 	 * %gs selector for kernel
445 	 */
446 	set_usegd(&gdt0[GDT_GS], &cpus[0], sizeof (struct cpu) -1, SDT_MEMRWA,
447 	    SEL_KPL, SDP_BYTES, SDP_OP32);
448 
449 	/*
450 	 * Initialize lwp private descriptors.
451 	 * Only attributes and limits are initialized, the effective
452 	 * base address is programmed via fsbase/gsbase.
453 	 */
454 	set_usegd(&gdt0[GDT_LWPFS], NULL, (size_t)-1, SDT_MEMRWA, SEL_UPL,
455 	    SDP_PAGES, SDP_OP32);
456 	set_usegd(&gdt0[GDT_LWPGS], NULL, (size_t)-1, SDT_MEMRWA, SEL_UPL,
457 	    SDP_PAGES, SDP_OP32);
458 
459 	/*
460 	 * Install our new GDT
461 	 */
462 	r_gdt.dtr_limit = sizeof (gdt0) - 1;
463 	r_gdt.dtr_base = (uintptr_t)gdt0;
464 	wr_gdtr(&r_gdt);
465 
466 	/*
467 	 * Initialize convenient zero base user descriptors for clearing
468 	 * lwp private %fs and %gs descriptors in GDT. See setregs() for
469 	 * an example.
470 	 */
471 	set_usegd(&zero_udesc, 0, -1, SDT_MEMRWA, SEL_UPL, SDP_PAGES, SDP_OP32);
472 }
473 
474 #endif	/* __i386 */
475 
476 #if defined(__amd64)
477 
478 /*
479  * Build kernel IDT.
480  *
481  * Note that we pretty much require every gate to be an interrupt gate;
482  * that's because of our dependency on using 'swapgs' every time we come
483  * into the kernel to find the cpu structure - if we get interrupted just
484  * before doing that, so that %cs is in kernel mode (so that the trap prolog
485  * doesn't do a swapgs), but %gsbase is really still pointing at something
486  * in userland, bad things ensue.
487  *
488  * Perhaps they should have invented a trap gate that does an atomic swapgs?
489  *
490  * XX64	We do need to think further about the follow-on impact of this.
491  *	Most of the kernel handlers re-enable interrupts as soon as they've
492  *	saved register state and done the swapgs, but there may be something
493  *	more subtle going on.
494  */
495 static void
496 init_idt(void)
497 {
498 	char	ivctname[80];
499 	void	(*ivctptr)(void);
500 	int	i;
501 
502 	/*
503 	 * Initialize entire table with 'reserved' trap and then overwrite
504 	 * specific entries. T_EXTOVRFLT (9) is unsupported and reserved
505 	 * since it can only be generated on a 386 processor. 15 is also
506 	 * unsupported and reserved.
507 	 */
508 	for (i = 0; i < NIDT; i++)
509 		set_gatesegd(&idt0[i], &resvtrap, KCS_SEL, 0, SDT_SYSIGT,
510 		    SEL_KPL);
511 
512 	set_gatesegd(&idt0[T_ZERODIV], &div0trap, KCS_SEL, 0, SDT_SYSIGT,
513 	    SEL_KPL);
514 	set_gatesegd(&idt0[T_SGLSTP], &dbgtrap, KCS_SEL, 0, SDT_SYSIGT,
515 	    SEL_KPL);
516 	set_gatesegd(&idt0[T_NMIFLT], &nmiint, KCS_SEL, 0, SDT_SYSIGT,
517 	    SEL_KPL);
518 	set_gatesegd(&idt0[T_BPTFLT], &brktrap, KCS_SEL, 0, SDT_SYSIGT,
519 	    SEL_UPL);
520 	set_gatesegd(&idt0[T_OVFLW], &ovflotrap, KCS_SEL, 0, SDT_SYSIGT,
521 	    SEL_UPL);
522 	set_gatesegd(&idt0[T_BOUNDFLT], &boundstrap, KCS_SEL, 0, SDT_SYSIGT,
523 	    SEL_KPL);
524 	set_gatesegd(&idt0[T_ILLINST], &invoptrap, KCS_SEL, 0, SDT_SYSIGT,
525 	    SEL_KPL);
526 	set_gatesegd(&idt0[T_NOEXTFLT], &ndptrap,  KCS_SEL, 0, SDT_SYSIGT,
527 	    SEL_KPL);
528 
529 	/*
530 	 * double fault handler.
531 	 */
532 	set_gatesegd(&idt0[T_DBLFLT], &syserrtrap, KCS_SEL, 1, SDT_SYSIGT,
533 	    SEL_KPL);
534 
535 	/*
536 	 * T_EXTOVRFLT coprocessor-segment-overrun not supported.
537 	 */
538 
539 	set_gatesegd(&idt0[T_TSSFLT], &invtsstrap, KCS_SEL, 0, SDT_SYSIGT,
540 	    SEL_KPL);
541 	set_gatesegd(&idt0[T_SEGFLT], &segnptrap, KCS_SEL, 0, SDT_SYSIGT,
542 	    SEL_KPL);
543 	set_gatesegd(&idt0[T_STKFLT], &stktrap, KCS_SEL, 0, SDT_SYSIGT,
544 	    SEL_KPL);
545 	set_gatesegd(&idt0[T_GPFLT], &gptrap, KCS_SEL, 0, SDT_SYSIGT,
546 	    SEL_KPL);
547 	set_gatesegd(&idt0[T_PGFLT], &pftrap, KCS_SEL, 0, SDT_SYSIGT,
548 	    SEL_KPL);
549 
550 	/*
551 	 * 15 reserved.
552 	 */
553 	set_gatesegd(&idt0[15], &resvtrap, KCS_SEL, 0, SDT_SYSIGT, SEL_KPL);
554 
555 	set_gatesegd(&idt0[T_EXTERRFLT], &ndperr, KCS_SEL, 0, SDT_SYSIGT,
556 	    SEL_KPL);
557 	set_gatesegd(&idt0[T_ALIGNMENT], &achktrap, KCS_SEL, 0, SDT_SYSIGT,
558 	    SEL_KPL);
559 	set_gatesegd(&idt0[T_MCE], &mcetrap, KCS_SEL, 0, SDT_SYSIGT,
560 	    SEL_KPL);
561 	set_gatesegd(&idt0[T_SIMDFPE], &xmtrap, KCS_SEL, 0, SDT_SYSIGT,
562 	    SEL_KPL);
563 
564 	/*
565 	 * 20-31 reserved
566 	 */
567 	for (i = 20; i < 32; i++)
568 		set_gatesegd(&idt0[i], &invaltrap, KCS_SEL, 0, SDT_SYSIGT,
569 		    SEL_KPL);
570 
571 	/*
572 	 * interrupts 32 - 255
573 	 */
574 	for (i = 32; i < 256; i++) {
575 		(void) snprintf(ivctname, sizeof (ivctname), "ivct%d", i);
576 		ivctptr = (void (*)(void))kobj_getsymvalue(ivctname, 0);
577 		if (ivctptr == NULL)
578 			panic("kobj_getsymvalue(%s) failed", ivctname);
579 
580 		set_gatesegd(&idt0[i], ivctptr, KCS_SEL, 0, SDT_SYSIGT,
581 		    SEL_KPL);
582 	}
583 
584 	/*
585 	 * install fast trap handler at 210.
586 	 */
587 	set_gatesegd(&idt0[T_FASTTRAP], &fasttrap, KCS_SEL, 0,
588 	    SDT_SYSIGT, SEL_UPL);
589 
590 	/*
591 	 * System call handler.
592 	 */
593 	set_gatesegd(&idt0[T_SYSCALLINT], &sys_syscall_int, KCS_SEL, 0,
594 	    SDT_SYSIGT, SEL_UPL);
595 
596 	/*
597 	 * Install the DTrace interrupt handlers for the fasttrap provider.
598 	 */
599 	set_gatesegd(&idt0[T_DTRACE_PROBE], &dtrace_fasttrap, KCS_SEL, 0,
600 	    SDT_SYSIGT, SEL_UPL);
601 	set_gatesegd(&idt0[T_DTRACE_RET], &dtrace_ret, KCS_SEL, 0,
602 	    SDT_SYSIGT, SEL_UPL);
603 
604 	if (boothowto & RB_DEBUG)
605 		kdi_dvec_idt_sync(idt0);
606 
607 	/*
608 	 * We must maintain a description of idt0 in convenient IDTR format
609 	 * for use by T_NMIFLT and T_PGFLT (nmiint() and pentium_pftrap())
610 	 * handlers.
611 	 */
612 	idt0_default_r.dtr_limit = sizeof (idt0) - 1;
613 	idt0_default_r.dtr_base = (uintptr_t)idt0;
614 	wr_idtr(&idt0_default_r);
615 }
616 
617 #elif defined(__i386)
618 
619 /*
620  * Build kernel IDT.
621  */
622 static void
623 init_idt(void)
624 {
625 	char	ivctname[80];
626 	void	(*ivctptr)(void);
627 	int	i;
628 
629 	/*
630 	 * Initialize entire table with 'reserved' trap and then overwrite
631 	 * specific entries. T_EXTOVRFLT (9) is unsupported and reserved
632 	 * since it can only be generated on a 386 processor. 15 is also
633 	 * unsupported and reserved.
634 	 */
635 	for (i = 0; i < NIDT; i++)
636 		set_gatesegd(&idt0[i], &resvtrap, KCS_SEL, 0, SDT_SYSTGT,
637 		    SEL_KPL);
638 
639 	set_gatesegd(&idt0[T_ZERODIV], &div0trap, KCS_SEL, 0, SDT_SYSTGT,
640 	    SEL_KPL);
641 	set_gatesegd(&idt0[T_SGLSTP], &dbgtrap, KCS_SEL, 0, SDT_SYSIGT,
642 	    SEL_KPL);
643 	set_gatesegd(&idt0[T_NMIFLT], &nmiint, KCS_SEL, 0, SDT_SYSIGT,
644 	    SEL_KPL);
645 	set_gatesegd(&idt0[T_BPTFLT], &brktrap, KCS_SEL, 0, SDT_SYSTGT,
646 	    SEL_UPL);
647 	set_gatesegd(&idt0[T_OVFLW], &ovflotrap, KCS_SEL, 0, SDT_SYSTGT,
648 	    SEL_UPL);
649 	set_gatesegd(&idt0[T_BOUNDFLT], &boundstrap, KCS_SEL, 0, SDT_SYSTGT,
650 	    SEL_KPL);
651 	set_gatesegd(&idt0[T_ILLINST], &invoptrap, KCS_SEL, 0, SDT_SYSIGT,
652 	    SEL_KPL);
653 	set_gatesegd(&idt0[T_NOEXTFLT], &ndptrap,  KCS_SEL, 0, SDT_SYSIGT,
654 	    SEL_KPL);
655 
656 	/*
657 	 * Install TSS for T_DBLFLT handler.
658 	 */
659 	set_gatesegd(&idt0[T_DBLFLT], NULL, DFTSS_SEL, 0, SDT_SYSTASKGT,
660 	    SEL_KPL);
661 
662 	/*
663 	 * T_EXTOVRFLT coprocessor-segment-overrun not supported.
664 	 */
665 
666 	set_gatesegd(&idt0[T_TSSFLT], &invtsstrap, KCS_SEL, 0, SDT_SYSTGT,
667 	    SEL_KPL);
668 	set_gatesegd(&idt0[T_SEGFLT], &segnptrap, KCS_SEL, 0, SDT_SYSTGT,
669 	    SEL_KPL);
670 	set_gatesegd(&idt0[T_STKFLT], &stktrap, KCS_SEL, 0, SDT_SYSTGT,
671 	    SEL_KPL);
672 	set_gatesegd(&idt0[T_GPFLT], &gptrap, KCS_SEL, 0, SDT_SYSTGT,
673 	    SEL_KPL);
674 	set_gatesegd(&idt0[T_PGFLT], &pftrap, KCS_SEL, 0, SDT_SYSIGT,
675 	    SEL_KPL);
676 
677 	/*
678 	 * 15 reserved.
679 	 */
680 	set_gatesegd(&idt0[15], &resvtrap, KCS_SEL, 0, SDT_SYSTGT, SEL_KPL);
681 
682 	set_gatesegd(&idt0[T_EXTERRFLT], &ndperr, KCS_SEL, 0, SDT_SYSIGT,
683 	    SEL_KPL);
684 	set_gatesegd(&idt0[T_ALIGNMENT], &achktrap, KCS_SEL, 0, SDT_SYSTGT,
685 	    SEL_KPL);
686 	set_gatesegd(&idt0[T_MCE], &mcetrap, KCS_SEL, 0, SDT_SYSIGT,
687 	    SEL_KPL);
688 	set_gatesegd(&idt0[T_SIMDFPE], &xmtrap, KCS_SEL, 0, SDT_SYSTGT,
689 	    SEL_KPL);
690 
691 	/*
692 	 * 20-31 reserved
693 	 */
694 	for (i = 20; i < 32; i++)
695 		set_gatesegd(&idt0[i], &invaltrap, KCS_SEL, 0, SDT_SYSTGT,
696 		    SEL_KPL);
697 
698 	/*
699 	 * interrupts 32 - 255
700 	 */
701 	for (i = 32; i < 256; i++) {
702 		(void) snprintf(ivctname, sizeof (ivctname), "ivct%d", i);
703 		ivctptr = (void (*)(void))kobj_getsymvalue(ivctname, 0);
704 		if (ivctptr == NULL)
705 			panic("kobj_getsymvalue(%s) failed", ivctname);
706 
707 		set_gatesegd(&idt0[i], ivctptr, KCS_SEL, 0, SDT_SYSIGT,
708 		    SEL_KPL);
709 	}
710 
711 	/*
712 	 * install fast trap handler at 210.
713 	 */
714 	set_gatesegd(&idt0[T_FASTTRAP], &fasttrap, KCS_SEL, 0,
715 	    SDT_SYSIGT, SEL_UPL);
716 
717 	/*
718 	 * System call handler. Note that we don't use the hardware's parameter
719 	 * copying mechanism here; see the comment above sys_call() for details.
720 	 */
721 	set_gatesegd(&idt0[T_SYSCALLINT], &sys_call, KCS_SEL, 0,
722 	    SDT_SYSIGT, SEL_UPL);
723 
724 	/*
725 	 * Install the DTrace interrupt handlers for the fasttrap provider.
726 	 */
727 	set_gatesegd(&idt0[T_DTRACE_PROBE], &dtrace_fasttrap, KCS_SEL, 0,
728 	    SDT_SYSIGT, SEL_UPL);
729 	set_gatesegd(&idt0[T_DTRACE_RET], &dtrace_ret, KCS_SEL, 0,
730 	    SDT_SYSIGT, SEL_UPL);
731 
732 	if (boothowto & RB_DEBUG)
733 		kdi_dvec_idt_sync(idt0);
734 
735 	/*
736 	 * We must maintain a description of idt0 in convenient IDTR format
737 	 * for use by T_NMIFLT and T_PGFLT (nmiint() and pentium_pftrap())
738 	 * handlers.
739 	 */
740 	idt0_default_r.dtr_limit = sizeof (idt0) - 1;
741 	idt0_default_r.dtr_base = (uintptr_t)idt0;
742 	wr_idtr(&idt0_default_r);
743 }
744 
745 #endif	/* __i386 */
746 
747 /*
748  * The kernel does not deal with LDTs unless a user explicitly creates
749  * one. Under normal circumstances, the LDTR contains 0. Any process attempting
750  * to reference the LDT will therefore cause a #gp. System calls made via the
751  * obsolete lcall mechanism are emulated by the #gp fault handler.
752  */
753 static void
754 init_ldt(void)
755 {
756 	wr_ldtr(0);
757 }
758 
759 #if defined(__amd64)
760 
761 static void
762 init_tss(void)
763 {
764 	/*
765 	 * tss_rsp0 is dynamically filled in by resume() on each context switch.
766 	 * All exceptions but #DF will run on the thread stack.
767 	 * Set up the double fault stack here.
768 	 */
769 	ktss0.tss_ist1 =
770 	    (uint64_t)&dblfault_stack0[sizeof (dblfault_stack0)];
771 
772 	/*
773 	 * Set I/O bit map offset equal to size of TSS segment limit
774 	 * for no I/O permission map. This will force all user I/O
775 	 * instructions to generate #gp fault.
776 	 */
777 	ktss0.tss_bitmapbase = sizeof (ktss0);
778 
779 	/*
780 	 * Point %tr to descriptor for ktss0 in gdt.
781 	 */
782 	wr_tsr(KTSS_SEL);
783 }
784 
785 #elif defined(__i386)
786 
787 static void
788 init_tss(void)
789 {
790 	/*
791 	 * ktss0.tss_esp dynamically filled in by resume() on each
792 	 * context switch.
793 	 */
794 	ktss0.tss_ss0	= KDS_SEL;
795 	ktss0.tss_eip	= (uint32_t)_start;
796 	ktss0.tss_ds	= ktss0.tss_es = ktss0.tss_ss = KDS_SEL;
797 	ktss0.tss_cs	= KCS_SEL;
798 	ktss0.tss_fs	= KFS_SEL;
799 	ktss0.tss_gs	= KGS_SEL;
800 	ktss0.tss_ldt	= ULDT_SEL;
801 
802 	/*
803 	 * Initialize double fault tss.
804 	 */
805 	dftss0.tss_esp0	= (uint32_t)&dblfault_stack0[sizeof (dblfault_stack0)];
806 	dftss0.tss_ss0	= KDS_SEL;
807 
808 	/*
809 	 * tss_cr3 will get initialized in hat_kern_setup() once our page
810 	 * tables have been setup.
811 	 */
812 	dftss0.tss_eip	= (uint32_t)syserrtrap;
813 	dftss0.tss_esp	= (uint32_t)&dblfault_stack0[sizeof (dblfault_stack0)];
814 	dftss0.tss_cs	= KCS_SEL;
815 	dftss0.tss_ds	= KDS_SEL;
816 	dftss0.tss_es	= KDS_SEL;
817 	dftss0.tss_ss	= KDS_SEL;
818 	dftss0.tss_fs	= KFS_SEL;
819 	dftss0.tss_gs	= KGS_SEL;
820 
821 	/*
822 	 * Set I/O bit map offset equal to size of TSS segment limit
823 	 * for no I/O permission map. This will force all user I/O
824 	 * instructions to generate #gp fault.
825 	 */
826 	ktss0.tss_bitmapbase = sizeof (ktss0);
827 
828 	/*
829 	 * Point %tr to descriptor for ktss0 in gdt.
830 	 */
831 	wr_tsr(KTSS_SEL);
832 }
833 
834 #endif	/* __i386 */
835 
836 void
837 init_tables(void)
838 {
839 	init_gdt();
840 	init_tss();
841 	init_idt();
842 	init_ldt();
843 }
844