1 /* 2 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 3 * Use is subject to license terms. 4 */ 5 6 #pragma ident "%Z%%M% %I% %E% SMI" 7 8 /* 9 * Copyright (c) 1992 Terrence R. Lambert. 10 * Copyright (c) 1990 The Regents of the University of California. 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * William Jolitz. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 45 */ 46 47 #include <sys/types.h> 48 #include <sys/tss.h> 49 #include <sys/segments.h> 50 #include <sys/trap.h> 51 #include <sys/cpuvar.h> 52 #include <sys/x86_archext.h> 53 #include <sys/archsystm.h> 54 #include <sys/machsystm.h> 55 #include <sys/kobj.h> 56 #include <sys/cmn_err.h> 57 #include <sys/reboot.h> 58 #include <sys/kdi.h> 59 #include <sys/systm.h> 60 61 extern void syscall_int(void); 62 63 /* 64 * cpu0 and default tables and structures. 65 */ 66 desctbr_t gdt0_default_r; 67 68 #pragma align 16(idt0) 69 gate_desc_t idt0[NIDT]; /* interrupt descriptor table */ 70 desctbr_t idt0_default_r; /* describes idt0 in IDTR format */ 71 72 #pragma align 16(ktss0) 73 struct tss ktss0; /* kernel task state structure */ 74 75 #if defined(__i386) 76 #pragma align 16(dftss0) 77 struct tss dftss0; /* #DF double-fault exception */ 78 #endif /* __i386 */ 79 80 user_desc_t zero_udesc; /* base zero user desc native procs */ 81 system_desc_t zero_sdesc; 82 83 #if defined(__amd64) 84 user_desc_t zero_u32desc; /* 32-bit compatibility procs */ 85 #endif /* __amd64 */ 86 87 #pragma align 16(dblfault_stack0) 88 char dblfault_stack0[DEFAULTSTKSZ]; 89 90 extern void fast_null(void); 91 extern hrtime_t get_hrtime(void); 92 extern hrtime_t gethrvtime(void); 93 extern hrtime_t get_hrestime(void); 94 extern uint64_t getlgrp(void); 95 96 void (*(fasttable[]))(void) = { 97 fast_null, /* T_FNULL routine */ 98 fast_null, /* T_FGETFP routine (initially null) */ 99 fast_null, /* T_FSETFP routine (initially null) */ 100 (void (*)())get_hrtime, /* T_GETHRTIME */ 101 (void (*)())gethrvtime, /* T_GETHRVTIME */ 102 (void (*)())get_hrestime, /* T_GETHRESTIME */ 103 (void (*)())getlgrp /* T_GETLGRP */ 104 }; 105 106 /* 107 * software prototypes for default local descriptor table 108 */ 109 110 /* 111 * Routines for loading segment descriptors in format the hardware 112 * can understand. 113 */ 114 115 #if defined(__amd64) 116 117 /* 118 * In long mode we have the new L or long mode attribute bit 119 * for code segments. Only the conforming bit in type is used along 120 * with descriptor priority and present bits. Default operand size must 121 * be zero when in long mode. In 32-bit compatibility mode all fields 122 * are treated as in legacy mode. For data segments while in long mode 123 * only the present bit is loaded. 124 */ 125 void 126 set_usegd(user_desc_t *dp, uint_t lmode, void *base, size_t size, 127 uint_t type, uint_t dpl, uint_t gran, uint_t defopsz) 128 { 129 ASSERT(lmode == SDP_SHORT || lmode == SDP_LONG); 130 131 /* 132 * 64-bit long mode. 133 */ 134 if (lmode == SDP_LONG) 135 dp->usd_def32 = 0; /* 32-bit operands only */ 136 else 137 /* 138 * 32-bit compatibility mode. 139 */ 140 dp->usd_def32 = defopsz; /* 0 = 16, 1 = 32-bit ops */ 141 142 dp->usd_long = lmode; /* 64-bit mode */ 143 dp->usd_type = type; 144 dp->usd_dpl = dpl; 145 dp->usd_p = 1; 146 dp->usd_gran = gran; /* 0 = bytes, 1 = pages */ 147 148 dp->usd_lobase = (uintptr_t)base; 149 dp->usd_midbase = (uintptr_t)base >> 16; 150 dp->usd_hibase = (uintptr_t)base >> (16 + 8); 151 dp->usd_lolimit = size; 152 dp->usd_hilimit = (uintptr_t)size >> 16; 153 } 154 155 #elif defined(__i386) 156 157 /* 158 * Install user segment descriptor for code and data. 159 */ 160 void 161 set_usegd(user_desc_t *dp, void *base, size_t size, uint_t type, 162 uint_t dpl, uint_t gran, uint_t defopsz) 163 { 164 dp->usd_lolimit = size; 165 dp->usd_hilimit = (uintptr_t)size >> 16; 166 167 dp->usd_lobase = (uintptr_t)base; 168 dp->usd_midbase = (uintptr_t)base >> 16; 169 dp->usd_hibase = (uintptr_t)base >> (16 + 8); 170 171 dp->usd_type = type; 172 dp->usd_dpl = dpl; 173 dp->usd_p = 1; 174 dp->usd_def32 = defopsz; /* 0 = 16, 1 = 32 bit operands */ 175 dp->usd_gran = gran; /* 0 = bytes, 1 = pages */ 176 } 177 178 #endif /* __i386 */ 179 180 /* 181 * Install system segment descriptor for LDT and TSS segments. 182 */ 183 184 #if defined(__amd64) 185 186 void 187 set_syssegd(system_desc_t *dp, void *base, size_t size, uint_t type, 188 uint_t dpl) 189 { 190 dp->ssd_lolimit = size; 191 dp->ssd_hilimit = (uintptr_t)size >> 16; 192 193 dp->ssd_lobase = (uintptr_t)base; 194 dp->ssd_midbase = (uintptr_t)base >> 16; 195 dp->ssd_hibase = (uintptr_t)base >> (16 + 8); 196 dp->ssd_hi64base = (uintptr_t)base >> (16 + 8 + 8); 197 198 dp->ssd_type = type; 199 dp->ssd_zero1 = 0; /* must be zero */ 200 dp->ssd_zero2 = 0; 201 dp->ssd_dpl = dpl; 202 dp->ssd_p = 1; 203 dp->ssd_gran = 0; /* force byte units */ 204 } 205 206 #elif defined(__i386) 207 208 void 209 set_syssegd(system_desc_t *dp, void *base, size_t size, uint_t type, 210 uint_t dpl) 211 { 212 dp->ssd_lolimit = size; 213 dp->ssd_hilimit = (uintptr_t)size >> 16; 214 215 dp->ssd_lobase = (uintptr_t)base; 216 dp->ssd_midbase = (uintptr_t)base >> 16; 217 dp->ssd_hibase = (uintptr_t)base >> (16 + 8); 218 219 dp->ssd_type = type; 220 dp->ssd_zero = 0; /* must be zero */ 221 dp->ssd_dpl = dpl; 222 dp->ssd_p = 1; 223 dp->ssd_gran = 0; /* force byte units */ 224 } 225 226 #endif /* __i386 */ 227 228 /* 229 * Install gate segment descriptor for interrupt, trap, call and task gates. 230 */ 231 232 #if defined(__amd64) 233 234 /* 235 * Note stkcpy is replaced with ist. Read the PRM for details on this. 236 */ 237 void 238 set_gatesegd(gate_desc_t *dp, void (*func)(void), selector_t sel, uint_t ist, 239 uint_t type, uint_t dpl) 240 { 241 dp->sgd_looffset = (uintptr_t)func; 242 dp->sgd_hioffset = (uintptr_t)func >> 16; 243 dp->sgd_hi64offset = (uintptr_t)func >> (16 + 16); 244 245 dp->sgd_selector = (uint16_t)sel; 246 dp->sgd_ist = ist; 247 dp->sgd_type = type; 248 dp->sgd_dpl = dpl; 249 dp->sgd_p = 1; 250 } 251 252 #elif defined(__i386) 253 254 void 255 set_gatesegd(gate_desc_t *dp, void (*func)(void), selector_t sel, 256 uint_t wcount, uint_t type, uint_t dpl) 257 { 258 dp->sgd_looffset = (uintptr_t)func; 259 dp->sgd_hioffset = (uintptr_t)func >> 16; 260 261 dp->sgd_selector = (uint16_t)sel; 262 dp->sgd_stkcpy = wcount; 263 dp->sgd_type = type; 264 dp->sgd_dpl = dpl; 265 dp->sgd_p = 1; 266 } 267 268 #endif /* __i386 */ 269 270 /* 271 * Build kernel GDT. 272 */ 273 274 #if defined(__amd64) 275 276 static void 277 init_gdt(void) 278 { 279 desctbr_t r_bgdt, r_gdt; 280 user_desc_t *bgdt; 281 size_t alen = 0xfffff; /* entire 32-bit address space */ 282 283 /* 284 * Copy in from boot's gdt to our gdt entries 1 - 4. 285 * Entry 0 is the null descriptor by definition. 286 */ 287 rd_gdtr(&r_bgdt); 288 bgdt = (user_desc_t *)r_bgdt.dtr_base; 289 if (bgdt == NULL) 290 panic("null boot gdt"); 291 292 gdt0[GDT_B32DATA] = bgdt[GDT_B32DATA]; 293 gdt0[GDT_B32CODE] = bgdt[GDT_B32CODE]; 294 gdt0[GDT_B64DATA] = bgdt[GDT_B64DATA]; 295 gdt0[GDT_B64CODE] = bgdt[GDT_B64CODE]; 296 297 /* 298 * 64-bit kernel code segment. 299 */ 300 set_usegd(&gdt0[GDT_KCODE], SDP_LONG, NULL, 0, SDT_MEMERA, SEL_KPL, 301 SDP_PAGES, SDP_OP32); 302 303 /* 304 * 64-bit kernel data segment. The limit attribute is ignored in 64-bit 305 * mode, but we set it here to 0xFFFF so that we can use the SYSRET 306 * instruction to return from system calls back to 32-bit applications. 307 * SYSRET doesn't update the base, limit, or attributes of %ss or %ds 308 * descriptors. We therefore must ensure that the kernel uses something, 309 * though it will be ignored by hardware, that is compatible with 32-bit 310 * apps. For the same reason we must set the default op size of this 311 * descriptor to 32-bit operands. 312 */ 313 set_usegd(&gdt0[GDT_KDATA], SDP_LONG, NULL, alen, SDT_MEMRWA, 314 SEL_KPL, SDP_PAGES, SDP_OP32); 315 gdt0[GDT_KDATA].usd_def32 = 1; 316 317 /* 318 * 64-bit user code segment. 319 */ 320 set_usegd(&gdt0[GDT_UCODE], SDP_LONG, NULL, 0, SDT_MEMERA, SEL_UPL, 321 SDP_PAGES, SDP_OP32); 322 323 /* 324 * 32-bit user code segment. 325 */ 326 set_usegd(&gdt0[GDT_U32CODE], SDP_SHORT, NULL, alen, SDT_MEMERA, 327 SEL_UPL, SDP_PAGES, SDP_OP32); 328 329 /* 330 * 32 and 64 bit data segments can actually share the same descriptor. 331 * In long mode only the present bit is checked but all other fields 332 * are loaded. But in compatibility mode all fields are interpreted 333 * as in legacy mode so they must be set correctly for a 32-bit data 334 * segment. 335 */ 336 set_usegd(&gdt0[GDT_UDATA], SDP_SHORT, NULL, alen, SDT_MEMRWA, SEL_UPL, 337 SDP_PAGES, SDP_OP32); 338 339 /* 340 * The 64-bit kernel has no default LDT. By default, the LDT descriptor 341 * in the GDT is 0. 342 */ 343 344 /* 345 * Kernel TSS 346 */ 347 set_syssegd((system_desc_t *)&gdt0[GDT_KTSS], &ktss0, 348 sizeof (ktss0) - 1, SDT_SYSTSS, SEL_KPL); 349 350 /* 351 * Initialize fs and gs descriptors for 32 bit processes. 352 * Only attributes and limits are initialized, the effective 353 * base address is programmed via fsbase/gsbase. 354 */ 355 set_usegd(&gdt0[GDT_LWPFS], SDP_SHORT, NULL, alen, SDT_MEMRWA, 356 SEL_UPL, SDP_PAGES, SDP_OP32); 357 set_usegd(&gdt0[GDT_LWPGS], SDP_SHORT, NULL, alen, SDT_MEMRWA, 358 SEL_UPL, SDP_PAGES, SDP_OP32); 359 360 /* 361 * Install our new GDT 362 */ 363 r_gdt.dtr_limit = sizeof (gdt0) - 1; 364 r_gdt.dtr_base = (uintptr_t)gdt0; 365 wr_gdtr(&r_gdt); 366 367 /* 368 * Initialize convenient zero base user descriptors for clearing 369 * lwp private %fs and %gs descriptors in GDT. See setregs() for 370 * an example. 371 */ 372 set_usegd(&zero_udesc, SDP_LONG, 0, 0, SDT_MEMRWA, SEL_UPL, 373 SDP_BYTES, SDP_OP32); 374 set_usegd(&zero_u32desc, SDP_SHORT, 0, -1, SDT_MEMRWA, SEL_UPL, 375 SDP_PAGES, SDP_OP32); 376 } 377 378 #elif defined(__i386) 379 380 static void 381 init_gdt(void) 382 { 383 desctbr_t r_bgdt, r_gdt; 384 user_desc_t *bgdt; 385 386 /* 387 * Copy in from boot's gdt to our gdt entries 1 - 4. 388 * Entry 0 is null descriptor by definition. 389 */ 390 rd_gdtr(&r_bgdt); 391 bgdt = (user_desc_t *)r_bgdt.dtr_base; 392 if (bgdt == NULL) 393 panic("null boot gdt"); 394 395 gdt0[GDT_BOOTFLAT] = bgdt[GDT_BOOTFLAT]; 396 gdt0[GDT_BOOTCODE] = bgdt[GDT_BOOTCODE]; 397 gdt0[GDT_BOOTCODE16] = bgdt[GDT_BOOTCODE16]; 398 gdt0[GDT_BOOTDATA] = bgdt[GDT_BOOTDATA]; 399 400 /* 401 * Text and data for both kernel and user span entire 32 bit 402 * address space. 403 */ 404 405 /* 406 * kernel code segment. 407 */ 408 set_usegd(&gdt0[GDT_KCODE], NULL, -1, SDT_MEMERA, SEL_KPL, SDP_PAGES, 409 SDP_OP32); 410 411 /* 412 * kernel data segment. 413 */ 414 set_usegd(&gdt0[GDT_KDATA], NULL, -1, SDT_MEMRWA, SEL_KPL, SDP_PAGES, 415 SDP_OP32); 416 417 /* 418 * user code segment. 419 */ 420 set_usegd(&gdt0[GDT_UCODE], NULL, -1, SDT_MEMERA, SEL_UPL, SDP_PAGES, 421 SDP_OP32); 422 423 /* 424 * user data segment. 425 */ 426 set_usegd(&gdt0[GDT_UDATA], NULL, -1, SDT_MEMRWA, SEL_UPL, SDP_PAGES, 427 SDP_OP32); 428 429 /* 430 * TSS for T_DBLFLT (double fault) handler 431 */ 432 set_syssegd((system_desc_t *)&gdt0[GDT_DBFLT], &dftss0, 433 sizeof (dftss0) - 1, SDT_SYSTSS, SEL_KPL); 434 435 /* 436 * TSS for kernel 437 */ 438 set_syssegd((system_desc_t *)&gdt0[GDT_KTSS], &ktss0, 439 sizeof (ktss0) - 1, SDT_SYSTSS, SEL_KPL); 440 441 /* 442 * %gs selector for kernel 443 */ 444 set_usegd(&gdt0[GDT_GS], &cpus[0], sizeof (struct cpu) -1, SDT_MEMRWA, 445 SEL_KPL, SDP_BYTES, SDP_OP32); 446 447 /* 448 * Initialize lwp private descriptors. 449 * Only attributes and limits are initialized, the effective 450 * base address is programmed via fsbase/gsbase. 451 */ 452 set_usegd(&gdt0[GDT_LWPFS], NULL, (size_t)-1, SDT_MEMRWA, SEL_UPL, 453 SDP_PAGES, SDP_OP32); 454 set_usegd(&gdt0[GDT_LWPGS], NULL, (size_t)-1, SDT_MEMRWA, SEL_UPL, 455 SDP_PAGES, SDP_OP32); 456 457 /* 458 * Install our new GDT 459 */ 460 r_gdt.dtr_limit = sizeof (gdt0) - 1; 461 r_gdt.dtr_base = (uintptr_t)gdt0; 462 wr_gdtr(&r_gdt); 463 464 /* 465 * Initialize convenient zero base user descriptors for clearing 466 * lwp private %fs and %gs descriptors in GDT. See setregs() for 467 * an example. 468 */ 469 set_usegd(&zero_udesc, 0, -1, SDT_MEMRWA, SEL_UPL, SDP_PAGES, SDP_OP32); 470 } 471 472 #endif /* __i386 */ 473 474 #if defined(__amd64) 475 476 /* 477 * Build kernel IDT. 478 * 479 * Note that we pretty much require every gate to be an interrupt gate; 480 * that's because of our dependency on using 'swapgs' every time we come 481 * into the kernel to find the cpu structure - if we get interrupted just 482 * before doing that, so that %cs is in kernel mode (so that the trap prolog 483 * doesn't do a swapgs), but %gsbase is really still pointing at something 484 * in userland, bad things ensue. 485 * 486 * Perhaps they should have invented a trap gate that does an atomic swapgs? 487 * 488 * XX64 We do need to think further about the follow-on impact of this. 489 * Most of the kernel handlers re-enable interrupts as soon as they've 490 * saved register state and done the swapgs, but there may be something 491 * more subtle going on. 492 */ 493 static void 494 init_idt(void) 495 { 496 char ivctname[80]; 497 void (*ivctptr)(void); 498 int i; 499 500 /* 501 * Initialize entire table with 'reserved' trap and then overwrite 502 * specific entries. T_EXTOVRFLT (9) is unsupported and reserved 503 * since it can only be generated on a 386 processor. 15 is also 504 * unsupported and reserved. 505 */ 506 for (i = 0; i < NIDT; i++) 507 set_gatesegd(&idt0[i], &resvtrap, KCS_SEL, 0, SDT_SYSIGT, 508 SEL_KPL); 509 510 set_gatesegd(&idt0[T_ZERODIV], &div0trap, KCS_SEL, 0, SDT_SYSIGT, 511 SEL_KPL); 512 set_gatesegd(&idt0[T_SGLSTP], &dbgtrap, KCS_SEL, 0, SDT_SYSIGT, 513 SEL_KPL); 514 set_gatesegd(&idt0[T_NMIFLT], &nmiint, KCS_SEL, 0, SDT_SYSIGT, 515 SEL_KPL); 516 set_gatesegd(&idt0[T_BPTFLT], &brktrap, KCS_SEL, 0, SDT_SYSIGT, 517 SEL_UPL); 518 set_gatesegd(&idt0[T_OVFLW], &ovflotrap, KCS_SEL, 0, SDT_SYSIGT, 519 SEL_UPL); 520 set_gatesegd(&idt0[T_BOUNDFLT], &boundstrap, KCS_SEL, 0, SDT_SYSIGT, 521 SEL_KPL); 522 set_gatesegd(&idt0[T_ILLINST], &invoptrap, KCS_SEL, 0, SDT_SYSIGT, 523 SEL_KPL); 524 set_gatesegd(&idt0[T_NOEXTFLT], &ndptrap, KCS_SEL, 0, SDT_SYSIGT, 525 SEL_KPL); 526 527 /* 528 * double fault handler. 529 */ 530 set_gatesegd(&idt0[T_DBLFLT], &syserrtrap, KCS_SEL, 1, SDT_SYSIGT, 531 SEL_KPL); 532 533 /* 534 * T_EXTOVRFLT coprocessor-segment-overrun not supported. 535 */ 536 537 set_gatesegd(&idt0[T_TSSFLT], &invtsstrap, KCS_SEL, 0, SDT_SYSIGT, 538 SEL_KPL); 539 set_gatesegd(&idt0[T_SEGFLT], &segnptrap, KCS_SEL, 0, SDT_SYSIGT, 540 SEL_KPL); 541 set_gatesegd(&idt0[T_STKFLT], &stktrap, KCS_SEL, 0, SDT_SYSIGT, 542 SEL_KPL); 543 set_gatesegd(&idt0[T_GPFLT], &gptrap, KCS_SEL, 0, SDT_SYSIGT, 544 SEL_KPL); 545 set_gatesegd(&idt0[T_PGFLT], &pftrap, KCS_SEL, 0, SDT_SYSIGT, 546 SEL_KPL); 547 548 /* 549 * 15 reserved. 550 */ 551 set_gatesegd(&idt0[15], &resvtrap, KCS_SEL, 0, SDT_SYSIGT, SEL_KPL); 552 553 set_gatesegd(&idt0[T_EXTERRFLT], &ndperr, KCS_SEL, 0, SDT_SYSIGT, 554 SEL_KPL); 555 set_gatesegd(&idt0[T_ALIGNMENT], &achktrap, KCS_SEL, 0, SDT_SYSIGT, 556 SEL_KPL); 557 set_gatesegd(&idt0[T_MCE], &mcetrap, KCS_SEL, 0, SDT_SYSIGT, 558 SEL_KPL); 559 set_gatesegd(&idt0[T_SIMDFPE], &xmtrap, KCS_SEL, 0, SDT_SYSIGT, 560 SEL_KPL); 561 562 /* 563 * 20-31 reserved 564 */ 565 for (i = 20; i < 32; i++) 566 set_gatesegd(&idt0[i], &invaltrap, KCS_SEL, 0, SDT_SYSIGT, 567 SEL_KPL); 568 569 /* 570 * interrupts 32 - 255 571 */ 572 for (i = 32; i < 256; i++) { 573 (void) snprintf(ivctname, sizeof (ivctname), "ivct%d", i); 574 ivctptr = (void (*)(void))kobj_getsymvalue(ivctname, 0); 575 if (ivctptr == NULL) 576 panic("kobj_getsymvalue(%s) failed", ivctname); 577 578 set_gatesegd(&idt0[i], ivctptr, KCS_SEL, 0, SDT_SYSIGT, 579 SEL_KPL); 580 } 581 582 /* 583 * install fast trap handler at 210. 584 */ 585 set_gatesegd(&idt0[T_FASTTRAP], &fasttrap, KCS_SEL, 0, 586 SDT_SYSIGT, SEL_UPL); 587 588 /* 589 * System call handler. 590 */ 591 set_gatesegd(&idt0[T_SYSCALLINT], &sys_syscall_int, KCS_SEL, 0, 592 SDT_SYSIGT, SEL_UPL); 593 594 /* 595 * Install the DTrace interrupt handler for the pid provider. 596 */ 597 set_gatesegd(&idt0[T_DTRACE_RET], &dtrace_ret, KCS_SEL, 0, 598 SDT_SYSIGT, SEL_UPL); 599 600 if (boothowto & RB_DEBUG) 601 kdi_dvec_idt_sync(idt0); 602 603 /* 604 * We must maintain a description of idt0 in convenient IDTR format 605 * for use by T_NMIFLT and T_PGFLT (nmiint() and pentium_pftrap()) 606 * handlers. 607 */ 608 idt0_default_r.dtr_limit = sizeof (idt0) - 1; 609 idt0_default_r.dtr_base = (uintptr_t)idt0; 610 wr_idtr(&idt0_default_r); 611 } 612 613 #elif defined(__i386) 614 615 /* 616 * Build kernel IDT. 617 */ 618 static void 619 init_idt(void) 620 { 621 char ivctname[80]; 622 void (*ivctptr)(void); 623 int i; 624 625 /* 626 * Initialize entire table with 'reserved' trap and then overwrite 627 * specific entries. T_EXTOVRFLT (9) is unsupported and reserved 628 * since it can only be generated on a 386 processor. 15 is also 629 * unsupported and reserved. 630 */ 631 for (i = 0; i < NIDT; i++) 632 set_gatesegd(&idt0[i], &resvtrap, KCS_SEL, 0, SDT_SYSTGT, 633 SEL_KPL); 634 635 set_gatesegd(&idt0[T_ZERODIV], &div0trap, KCS_SEL, 0, SDT_SYSTGT, 636 SEL_KPL); 637 set_gatesegd(&idt0[T_SGLSTP], &dbgtrap, KCS_SEL, 0, SDT_SYSIGT, 638 SEL_KPL); 639 set_gatesegd(&idt0[T_NMIFLT], &nmiint, KCS_SEL, 0, SDT_SYSIGT, 640 SEL_KPL); 641 set_gatesegd(&idt0[T_BPTFLT], &brktrap, KCS_SEL, 0, SDT_SYSTGT, 642 SEL_UPL); 643 set_gatesegd(&idt0[T_OVFLW], &ovflotrap, KCS_SEL, 0, SDT_SYSTGT, 644 SEL_UPL); 645 set_gatesegd(&idt0[T_BOUNDFLT], &boundstrap, KCS_SEL, 0, SDT_SYSTGT, 646 SEL_KPL); 647 set_gatesegd(&idt0[T_ILLINST], &invoptrap, KCS_SEL, 0, SDT_SYSIGT, 648 SEL_KPL); 649 set_gatesegd(&idt0[T_NOEXTFLT], &ndptrap, KCS_SEL, 0, SDT_SYSIGT, 650 SEL_KPL); 651 652 /* 653 * Install TSS for T_DBLFLT handler. 654 */ 655 set_gatesegd(&idt0[T_DBLFLT], NULL, DFTSS_SEL, 0, SDT_SYSTASKGT, 656 SEL_KPL); 657 658 /* 659 * T_EXTOVRFLT coprocessor-segment-overrun not supported. 660 */ 661 662 set_gatesegd(&idt0[T_TSSFLT], &invtsstrap, KCS_SEL, 0, SDT_SYSTGT, 663 SEL_KPL); 664 set_gatesegd(&idt0[T_SEGFLT], &segnptrap, KCS_SEL, 0, SDT_SYSTGT, 665 SEL_KPL); 666 set_gatesegd(&idt0[T_STKFLT], &stktrap, KCS_SEL, 0, SDT_SYSTGT, 667 SEL_KPL); 668 set_gatesegd(&idt0[T_GPFLT], &gptrap, KCS_SEL, 0, SDT_SYSTGT, 669 SEL_KPL); 670 set_gatesegd(&idt0[T_PGFLT], &pftrap, KCS_SEL, 0, SDT_SYSIGT, 671 SEL_KPL); 672 673 /* 674 * 15 reserved. 675 */ 676 set_gatesegd(&idt0[15], &resvtrap, KCS_SEL, 0, SDT_SYSTGT, SEL_KPL); 677 678 set_gatesegd(&idt0[T_EXTERRFLT], &ndperr, KCS_SEL, 0, SDT_SYSIGT, 679 SEL_KPL); 680 set_gatesegd(&idt0[T_ALIGNMENT], &achktrap, KCS_SEL, 0, SDT_SYSTGT, 681 SEL_KPL); 682 set_gatesegd(&idt0[T_MCE], &mcetrap, KCS_SEL, 0, SDT_SYSIGT, 683 SEL_KPL); 684 set_gatesegd(&idt0[T_SIMDFPE], &xmtrap, KCS_SEL, 0, SDT_SYSTGT, 685 SEL_KPL); 686 687 /* 688 * 20-31 reserved 689 */ 690 for (i = 20; i < 32; i++) 691 set_gatesegd(&idt0[i], &invaltrap, KCS_SEL, 0, SDT_SYSTGT, 692 SEL_KPL); 693 694 /* 695 * interrupts 32 - 255 696 */ 697 for (i = 32; i < 256; i++) { 698 (void) snprintf(ivctname, sizeof (ivctname), "ivct%d", i); 699 ivctptr = (void (*)(void))kobj_getsymvalue(ivctname, 0); 700 if (ivctptr == NULL) 701 panic("kobj_getsymvalue(%s) failed", ivctname); 702 703 set_gatesegd(&idt0[i], ivctptr, KCS_SEL, 0, SDT_SYSIGT, 704 SEL_KPL); 705 } 706 707 /* 708 * install fast trap handler at 210. 709 */ 710 set_gatesegd(&idt0[T_FASTTRAP], &fasttrap, KCS_SEL, 0, 711 SDT_SYSIGT, SEL_UPL); 712 713 /* 714 * System call handler. Note that we don't use the hardware's parameter 715 * copying mechanism here; see the comment above sys_call() for details. 716 */ 717 set_gatesegd(&idt0[T_SYSCALLINT], &sys_call, KCS_SEL, 0, 718 SDT_SYSIGT, SEL_UPL); 719 720 /* 721 * Install the DTrace interrupt handler for the pid provider. 722 */ 723 set_gatesegd(&idt0[T_DTRACE_RET], &dtrace_ret, KCS_SEL, 0, 724 SDT_SYSIGT, SEL_UPL); 725 726 if (boothowto & RB_DEBUG) 727 kdi_dvec_idt_sync(idt0); 728 729 /* 730 * We must maintain a description of idt0 in convenient IDTR format 731 * for use by T_NMIFLT and T_PGFLT (nmiint() and pentium_pftrap()) 732 * handlers. 733 */ 734 idt0_default_r.dtr_limit = sizeof (idt0) - 1; 735 idt0_default_r.dtr_base = (uintptr_t)idt0; 736 wr_idtr(&idt0_default_r); 737 } 738 739 #endif /* __i386 */ 740 741 /* 742 * The kernel does not deal with LDTs unless a user explicitly creates 743 * one. Under normal circumstances, the LDTR contains 0. Any process attempting 744 * to reference the LDT will therefore cause a #gp. System calls made via the 745 * obsolete lcall mechanism are emulated by the #gp fault handler. 746 */ 747 static void 748 init_ldt(void) 749 { 750 wr_ldtr(0); 751 } 752 753 #if defined(__amd64) 754 755 static void 756 init_tss(void) 757 { 758 /* 759 * tss_rsp0 is dynamically filled in by resume() on each context switch. 760 * All exceptions but #DF will run on the thread stack. 761 * Set up the double fault stack here. 762 */ 763 ktss0.tss_ist1 = 764 (uint64_t)&dblfault_stack0[sizeof (dblfault_stack0)]; 765 766 /* 767 * Set I/O bit map offset equal to size of TSS segment limit 768 * for no I/O permission map. This will force all user I/O 769 * instructions to generate #gp fault. 770 */ 771 ktss0.tss_bitmapbase = sizeof (ktss0); 772 773 /* 774 * Point %tr to descriptor for ktss0 in gdt. 775 */ 776 wr_tsr(KTSS_SEL); 777 } 778 779 #elif defined(__i386) 780 781 static void 782 init_tss(void) 783 { 784 /* 785 * ktss0.tss_esp dynamically filled in by resume() on each 786 * context switch. 787 */ 788 ktss0.tss_ss0 = KDS_SEL; 789 ktss0.tss_eip = (uint32_t)_start; 790 ktss0.tss_ds = ktss0.tss_es = ktss0.tss_ss = KDS_SEL; 791 ktss0.tss_cs = KCS_SEL; 792 ktss0.tss_fs = KFS_SEL; 793 ktss0.tss_gs = KGS_SEL; 794 ktss0.tss_ldt = ULDT_SEL; 795 796 /* 797 * Initialize double fault tss. 798 */ 799 dftss0.tss_esp0 = (uint32_t)&dblfault_stack0[sizeof (dblfault_stack0)]; 800 dftss0.tss_ss0 = KDS_SEL; 801 802 /* 803 * tss_cr3 will get initialized in hat_kern_setup() once our page 804 * tables have been setup. 805 */ 806 dftss0.tss_eip = (uint32_t)syserrtrap; 807 dftss0.tss_esp = (uint32_t)&dblfault_stack0[sizeof (dblfault_stack0)]; 808 dftss0.tss_cs = KCS_SEL; 809 dftss0.tss_ds = KDS_SEL; 810 dftss0.tss_es = KDS_SEL; 811 dftss0.tss_ss = KDS_SEL; 812 dftss0.tss_fs = KFS_SEL; 813 dftss0.tss_gs = KGS_SEL; 814 815 /* 816 * Set I/O bit map offset equal to size of TSS segment limit 817 * for no I/O permission map. This will force all user I/O 818 * instructions to generate #gp fault. 819 */ 820 ktss0.tss_bitmapbase = sizeof (ktss0); 821 822 /* 823 * Point %tr to descriptor for ktss0 in gdt. 824 */ 825 wr_tsr(KTSS_SEL); 826 } 827 828 #endif /* __i386 */ 829 830 void 831 init_tables(void) 832 { 833 init_gdt(); 834 init_tss(); 835 init_idt(); 836 init_ldt(); 837 } 838