1 /*- 2 * Copyright (c) 2009 Alex Keda <admin@lissyara.su> 3 * Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_x86bios.h" 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/module.h> 39 #include <sys/mutex.h> 40 #include <sys/proc.h> 41 #include <sys/sysctl.h> 42 43 #include <contrib/x86emu/x86emu.h> 44 #include <contrib/x86emu/x86emu_regs.h> 45 #include <compat/x86bios/x86bios.h> 46 47 #include <dev/pci/pcireg.h> 48 #include <dev/pci/pcivar.h> 49 50 #include <machine/iodev.h> 51 52 #include <vm/vm.h> 53 #include <vm/pmap.h> 54 55 #if defined(__amd64__) || defined(__i386__) 56 #define X86BIOS_NATIVE_ARCH 57 #endif 58 59 #define X86BIOS_PAGE_SIZE 0x00001000 /* 4K */ 60 61 #define X86BIOS_IVT_SIZE 0x00000500 /* 1K + 256 (BDA) */ 62 #define X86BIOS_SEG_SIZE 0x00010000 /* 64K */ 63 #define X86BIOS_MEM_SIZE 0x00100000 /* 1M */ 64 65 #define X86BIOS_IVT_BASE 0x00000000 66 #define X86BIOS_RAM_BASE 0x00001000 67 #define X86BIOS_ROM_BASE 0x000a0000 68 69 #define X86BIOS_ROM_SIZE (X86BIOS_MEM_SIZE - (uint32_t)x86bios_rom_phys) 70 71 #define X86BIOS_PAGES (X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE) 72 73 #define X86BIOS_R_DS _pad1 74 #define X86BIOS_R_SS _pad2 75 76 static struct x86emu x86bios_emu; 77 78 static struct mtx x86bios_lock; 79 80 static void *x86bios_ivt; 81 static void *x86bios_rom; 82 static void *x86bios_seg; 83 84 static vm_offset_t *x86bios_map; 85 86 static vm_paddr_t x86bios_rom_phys; 87 static vm_paddr_t x86bios_seg_phys; 88 89 static int x86bios_fault; 90 static uint32_t x86bios_fault_addr; 91 static uint16_t x86bios_fault_cs; 92 static uint16_t x86bios_fault_ip; 93 94 SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL, "x86bios debugging"); 95 static int x86bios_trace_call; 96 TUNABLE_INT("debug.x86bios.call", &x86bios_trace_call); 97 SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RW, &x86bios_trace_call, 0, 98 "Trace far function calls"); 99 static int x86bios_trace_int; 100 TUNABLE_INT("debug.x86bios.int", &x86bios_trace_int); 101 SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RW, &x86bios_trace_int, 0, 102 "Trace software interrupt handlers"); 103 104 static void 105 x86bios_set_fault(struct x86emu *emu, uint32_t addr) 106 { 107 108 x86bios_fault = 1; 109 x86bios_fault_addr = addr; 110 x86bios_fault_cs = emu->x86.R_CS; 111 x86bios_fault_ip = emu->x86.R_IP; 112 x86emu_halt_sys(emu); 113 } 114 115 static void * 116 x86bios_get_pages(uint32_t offset, size_t size) 117 { 118 vm_offset_t page; 119 120 if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE) 121 return (NULL); 122 123 if (offset >= X86BIOS_MEM_SIZE) 124 offset -= X86BIOS_MEM_SIZE; 125 page = x86bios_map[offset / X86BIOS_PAGE_SIZE]; 126 if (page != 0) 127 return ((void *)(page + offset % X86BIOS_PAGE_SIZE)); 128 129 return (NULL); 130 } 131 132 static void 133 x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size) 134 { 135 int i, j; 136 137 for (i = pa / X86BIOS_PAGE_SIZE, j = 0; 138 j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++) 139 x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE; 140 } 141 142 static uint8_t 143 x86bios_emu_rdb(struct x86emu *emu, uint32_t addr) 144 { 145 uint8_t *va; 146 147 va = x86bios_get_pages(addr, sizeof(*va)); 148 if (va == NULL) 149 x86bios_set_fault(emu, addr); 150 151 return (*va); 152 } 153 154 static uint16_t 155 x86bios_emu_rdw(struct x86emu *emu, uint32_t addr) 156 { 157 uint16_t *va; 158 159 va = x86bios_get_pages(addr, sizeof(*va)); 160 if (va == NULL) 161 x86bios_set_fault(emu, addr); 162 163 #ifndef __NO_STRICT_ALIGNMENT 164 if ((addr & 1) != 0) 165 return (le16dec(va)); 166 else 167 #endif 168 return (le16toh(*va)); 169 } 170 171 static uint32_t 172 x86bios_emu_rdl(struct x86emu *emu, uint32_t addr) 173 { 174 uint32_t *va; 175 176 va = x86bios_get_pages(addr, sizeof(*va)); 177 if (va == NULL) 178 x86bios_set_fault(emu, addr); 179 180 #ifndef __NO_STRICT_ALIGNMENT 181 if ((addr & 3) != 0) 182 return (le32dec(va)); 183 else 184 #endif 185 return (le32toh(*va)); 186 } 187 188 static void 189 x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val) 190 { 191 uint8_t *va; 192 193 va = x86bios_get_pages(addr, sizeof(*va)); 194 if (va == NULL) 195 x86bios_set_fault(emu, addr); 196 197 *va = val; 198 } 199 200 static void 201 x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val) 202 { 203 uint16_t *va; 204 205 va = x86bios_get_pages(addr, sizeof(*va)); 206 if (va == NULL) 207 x86bios_set_fault(emu, addr); 208 209 #ifndef __NO_STRICT_ALIGNMENT 210 if ((addr & 1) != 0) 211 le16enc(va, val); 212 else 213 #endif 214 *va = htole16(val); 215 } 216 217 static void 218 x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val) 219 { 220 uint32_t *va; 221 222 va = x86bios_get_pages(addr, sizeof(*va)); 223 if (va == NULL) 224 x86bios_set_fault(emu, addr); 225 226 #ifndef __NO_STRICT_ALIGNMENT 227 if ((addr & 3) != 0) 228 le32enc(va, val); 229 else 230 #endif 231 *va = htole32(val); 232 } 233 234 static uint8_t 235 x86bios_emu_inb(struct x86emu *emu, uint16_t port) 236 { 237 238 if (port == 0xb2) /* APM scratch register */ 239 return (0); 240 if (port >= 0x80 && port < 0x88) /* POST status register */ 241 return (0); 242 243 return (iodev_read_1(port)); 244 } 245 246 static uint16_t 247 x86bios_emu_inw(struct x86emu *emu, uint16_t port) 248 { 249 uint16_t val; 250 251 if (port >= 0x80 && port < 0x88) /* POST status register */ 252 return (0); 253 254 #ifndef X86BIOS_NATIVE_ARCH 255 if ((port & 1) != 0) { 256 val = iodev_read_1(port); 257 val |= iodev_read_1(port + 1) << 8; 258 } else 259 #endif 260 val = iodev_read_2(port); 261 262 return (val); 263 } 264 265 static uint32_t 266 x86bios_emu_inl(struct x86emu *emu, uint16_t port) 267 { 268 uint32_t val; 269 270 if (port >= 0x80 && port < 0x88) /* POST status register */ 271 return (0); 272 273 #ifndef X86BIOS_NATIVE_ARCH 274 if ((port & 1) != 0) { 275 val = iodev_read_1(port); 276 val |= iodev_read_2(port + 1) << 8; 277 val |= iodev_read_1(port + 3) << 24; 278 } else if ((port & 2) != 0) { 279 val = iodev_read_2(port); 280 val |= iodev_read_2(port + 2) << 16; 281 } else 282 #endif 283 val = iodev_read_4(port); 284 285 return (val); 286 } 287 288 static void 289 x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val) 290 { 291 292 if (port == 0xb2) /* APM scratch register */ 293 return; 294 if (port >= 0x80 && port < 0x88) /* POST status register */ 295 return; 296 297 iodev_write_1(port, val); 298 } 299 300 static void 301 x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val) 302 { 303 304 if (port >= 0x80 && port < 0x88) /* POST status register */ 305 return; 306 307 #ifndef X86BIOS_NATIVE_ARCH 308 if ((port & 1) != 0) { 309 iodev_write_1(port, val); 310 iodev_write_1(port + 1, val >> 8); 311 } else 312 #endif 313 iodev_write_2(port, val); 314 } 315 316 static void 317 x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val) 318 { 319 320 if (port >= 0x80 && port < 0x88) /* POST status register */ 321 return; 322 323 #ifndef X86BIOS_NATIVE_ARCH 324 if ((port & 1) != 0) { 325 iodev_write_1(port, val); 326 iodev_write_2(port + 1, val >> 8); 327 iodev_write_1(port + 3, val >> 24); 328 } else if ((port & 2) != 0) { 329 iodev_write_2(port, val); 330 iodev_write_2(port + 2, val >> 16); 331 } else 332 #endif 333 iodev_write_4(port, val); 334 } 335 336 static void 337 x86bios_emu_get_intr(struct x86emu *emu, int intno) 338 { 339 uint16_t *sp; 340 uint32_t iv; 341 342 emu->x86.R_SP -= 6; 343 344 sp = (uint16_t *)((vm_offset_t)x86bios_seg + emu->x86.R_SP); 345 sp[0] = htole16(emu->x86.R_IP); 346 sp[1] = htole16(emu->x86.R_CS); 347 sp[2] = htole16(emu->x86.R_FLG); 348 349 iv = x86bios_get_intr(intno); 350 emu->x86.R_IP = iv & 0xffff; 351 emu->x86.R_CS = (iv >> 16) & 0xffff; 352 emu->x86.R_FLG &= ~(F_IF | F_TF); 353 } 354 355 void * 356 x86bios_alloc(uint32_t *offset, size_t size, int flags) 357 { 358 void *vaddr; 359 360 if (offset == NULL || size == 0) 361 return (NULL); 362 363 vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE, 364 x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0); 365 if (vaddr != NULL) { 366 *offset = vtophys(vaddr); 367 x86bios_set_pages((vm_offset_t)vaddr, *offset, size); 368 } 369 370 return (vaddr); 371 } 372 373 void 374 x86bios_free(void *addr, size_t size) 375 { 376 vm_paddr_t paddr; 377 378 if (addr == NULL || size == 0) 379 return; 380 381 paddr = vtophys(addr); 382 if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys || 383 paddr % X86BIOS_PAGE_SIZE != 0) 384 return; 385 386 bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE, 387 sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE)); 388 contigfree(addr, size, M_DEVBUF); 389 } 390 391 void 392 x86bios_init_regs(struct x86regs *regs) 393 { 394 395 bzero(regs, sizeof(*regs)); 396 regs->X86BIOS_R_DS = 0x40; 397 regs->X86BIOS_R_SS = x86bios_seg_phys >> 4; 398 } 399 400 void 401 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off) 402 { 403 404 if (x86bios_map == NULL) 405 return; 406 407 if (x86bios_trace_call) 408 printf("Calling 0x%05x (ax=0x%04x bx=0x%04x " 409 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n", 410 (seg << 4) + off, regs->R_AX, regs->R_BX, regs->R_CX, 411 regs->R_DX, regs->R_ES, regs->R_DI); 412 413 mtx_lock_spin(&x86bios_lock); 414 memcpy(&x86bios_emu.x86, regs, sizeof(*regs)); 415 x86bios_fault = 0; 416 x86emu_exec_call(&x86bios_emu, seg, off); 417 memcpy(regs, &x86bios_emu.x86, sizeof(*regs)); 418 mtx_unlock_spin(&x86bios_lock); 419 420 if (x86bios_trace_call) { 421 printf("Exiting 0x%05x (ax=0x%04x bx=0x%04x " 422 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n", 423 (seg << 4) + off, regs->R_AX, regs->R_BX, regs->R_CX, 424 regs->R_DX, regs->R_ES, regs->R_DI); 425 if (x86bios_fault) 426 printf("Page fault at 0x%05x from 0x%04x:0x%04x.\n", 427 x86bios_fault_addr, x86bios_fault_cs, 428 x86bios_fault_ip); 429 } 430 } 431 432 uint32_t 433 x86bios_get_intr(int intno) 434 { 435 uint32_t *iv; 436 437 iv = (uint32_t *)((vm_offset_t)x86bios_ivt + intno * 4); 438 439 return (le32toh(*iv)); 440 } 441 442 void 443 x86bios_intr(struct x86regs *regs, int intno) 444 { 445 446 if (intno < 0 || intno > 255) 447 return; 448 449 if (x86bios_map == NULL) 450 return; 451 452 if (x86bios_trace_int) 453 printf("Calling int 0x%x (ax=0x%04x bx=0x%04x " 454 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n", 455 intno, regs->R_AX, regs->R_BX, regs->R_CX, 456 regs->R_DX, regs->R_ES, regs->R_DI); 457 458 mtx_lock_spin(&x86bios_lock); 459 memcpy(&x86bios_emu.x86, regs, sizeof(*regs)); 460 x86bios_fault = 0; 461 x86emu_exec_intr(&x86bios_emu, intno); 462 memcpy(regs, &x86bios_emu.x86, sizeof(*regs)); 463 mtx_unlock_spin(&x86bios_lock); 464 465 if (x86bios_trace_int) { 466 printf("Exiting int 0x%x (ax=0x%04x bx=0x%04x " 467 "cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n", 468 intno, regs->R_AX, regs->R_BX, regs->R_CX, 469 regs->R_DX, regs->R_ES, regs->R_DI); 470 if (x86bios_fault) 471 printf("Page fault at 0x%05x from 0x%04x:0x%04x.\n", 472 x86bios_fault_addr, x86bios_fault_cs, 473 x86bios_fault_ip); 474 } 475 } 476 477 void * 478 x86bios_offset(uint32_t offset) 479 { 480 481 return (x86bios_get_pages(offset, 1)); 482 } 483 484 void * 485 x86bios_get_orm(uint32_t offset) 486 { 487 uint8_t *p; 488 489 /* Does the shadow ROM contain BIOS POST code for x86? */ 490 p = x86bios_offset(offset); 491 if (p == NULL || p[0] != 0x55 || p[1] != 0xaa || p[3] != 0xe9) 492 return (NULL); 493 494 return (p); 495 } 496 497 int 498 x86bios_match_device(uint32_t offset, device_t dev) 499 { 500 uint8_t *p; 501 uint16_t device, vendor; 502 uint8_t class, progif, subclass; 503 504 /* Does the shadow ROM contain BIOS POST code for x86? */ 505 p = x86bios_get_orm(offset); 506 if (p == NULL) 507 return (0); 508 509 /* Does it contain PCI data structure? */ 510 p += le16toh(*(uint16_t *)(p + 0x18)); 511 if (bcmp(p, "PCIR", 4) != 0 || 512 le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0) 513 return (0); 514 515 /* Does it match the vendor, device, and classcode? */ 516 vendor = le16toh(*(uint16_t *)(p + 0x04)); 517 device = le16toh(*(uint16_t *)(p + 0x06)); 518 progif = *(p + 0x0d); 519 subclass = *(p + 0x0e); 520 class = *(p + 0x0f); 521 if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) || 522 class != pci_get_class(dev) || subclass != pci_get_subclass(dev) || 523 progif != pci_get_progif(dev)) 524 return (0); 525 526 return (1); 527 } 528 529 static __inline void 530 x86bios_unmap_mem(void) 531 { 532 533 if (x86bios_ivt != NULL) 534 #ifdef X86BIOS_NATIVE_ARCH 535 pmap_unmapdev((vm_offset_t)x86bios_ivt, X86BIOS_IVT_SIZE); 536 #else 537 free(x86bios_ivt, M_DEVBUF); 538 #endif 539 if (x86bios_rom != NULL) 540 pmap_unmapdev((vm_offset_t)x86bios_rom, X86BIOS_ROM_SIZE); 541 if (x86bios_seg != NULL) 542 contigfree(x86bios_seg, X86BIOS_SEG_SIZE, M_DEVBUF); 543 } 544 545 static __inline int 546 x86bios_map_mem(void) 547 { 548 549 #ifdef X86BIOS_NATIVE_ARCH 550 x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE); 551 552 #ifndef PC98 553 /* Probe EBDA via BDA. */ 554 x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e); 555 x86bios_rom_phys = x86bios_rom_phys << 4; 556 if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE && 557 X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024) 558 x86bios_rom_phys = 559 rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE); 560 else 561 #endif 562 #else 563 x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_ZERO | M_WAITOK); 564 #endif 565 566 x86bios_rom_phys = X86BIOS_ROM_BASE; 567 x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE); 568 if (x86bios_rom == NULL) 569 goto fail; 570 #if defined(X86BIOS_NATIVE_ARCH) && !defined(PC98) 571 /* Change attribute for EBDA. */ 572 if (x86bios_rom_phys < X86BIOS_ROM_BASE && 573 pmap_change_attr((vm_offset_t)x86bios_rom, 574 X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0) 575 goto fail; 576 #endif 577 578 x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_WAITOK, 579 X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0); 580 x86bios_seg_phys = vtophys(x86bios_seg); 581 582 if (bootverbose) { 583 printf("x86bios: IVT 0x%06x-0x%06x at %p\n", 584 X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1, 585 x86bios_ivt); 586 printf("x86bios: SSEG 0x%06x-0x%06x at %p\n", 587 (uint32_t)x86bios_seg_phys, 588 X86BIOS_SEG_SIZE + (uint32_t)x86bios_seg_phys - 1, 589 x86bios_seg); 590 if (x86bios_rom_phys < X86BIOS_ROM_BASE) 591 printf("x86bios: EBDA 0x%06x-0x%06x at %p\n", 592 (uint32_t)x86bios_rom_phys, X86BIOS_ROM_BASE - 1, 593 x86bios_rom); 594 printf("x86bios: ROM 0x%06x-0x%06x at %p\n", 595 X86BIOS_ROM_BASE, X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1, 596 (void *)((vm_offset_t)x86bios_rom + X86BIOS_ROM_BASE - 597 (vm_offset_t)x86bios_rom_phys)); 598 } 599 600 return (0); 601 602 fail: 603 x86bios_unmap_mem(); 604 605 return (1); 606 } 607 608 static int 609 x86bios_init(void) 610 { 611 int i; 612 613 if (x86bios_map_mem() != 0) 614 return (ENOMEM); 615 616 mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_SPIN); 617 618 x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF, 619 M_WAITOK | M_ZERO); 620 x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE, 621 X86BIOS_IVT_SIZE); 622 x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys, 623 X86BIOS_ROM_SIZE); 624 x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys, 625 X86BIOS_SEG_SIZE); 626 627 bzero(&x86bios_emu, sizeof(x86bios_emu)); 628 629 x86bios_emu.emu_rdb = x86bios_emu_rdb; 630 x86bios_emu.emu_rdw = x86bios_emu_rdw; 631 x86bios_emu.emu_rdl = x86bios_emu_rdl; 632 x86bios_emu.emu_wrb = x86bios_emu_wrb; 633 x86bios_emu.emu_wrw = x86bios_emu_wrw; 634 x86bios_emu.emu_wrl = x86bios_emu_wrl; 635 636 x86bios_emu.emu_inb = x86bios_emu_inb; 637 x86bios_emu.emu_inw = x86bios_emu_inw; 638 x86bios_emu.emu_inl = x86bios_emu_inl; 639 x86bios_emu.emu_outb = x86bios_emu_outb; 640 x86bios_emu.emu_outw = x86bios_emu_outw; 641 x86bios_emu.emu_outl = x86bios_emu_outl; 642 643 for (i = 0; i < 256; i++) 644 x86bios_emu._x86emu_intrTab[i] = x86bios_emu_get_intr; 645 646 return (0); 647 } 648 649 static int 650 x86bios_uninit(void) 651 { 652 vm_offset_t *map = x86bios_map; 653 654 mtx_lock_spin(&x86bios_lock); 655 if (x86bios_map != NULL) { 656 free(x86bios_map, M_DEVBUF); 657 x86bios_map = NULL; 658 } 659 mtx_unlock_spin(&x86bios_lock); 660 661 if (map != NULL) 662 x86bios_unmap_mem(); 663 664 mtx_destroy(&x86bios_lock); 665 666 return (0); 667 } 668 669 static int 670 x86bios_modevent(module_t mod __unused, int type, void *data __unused) 671 { 672 673 switch (type) { 674 case MOD_LOAD: 675 return (x86bios_init()); 676 case MOD_UNLOAD: 677 return (x86bios_uninit()); 678 default: 679 return (ENOTSUP); 680 } 681 } 682 683 static moduledata_t x86bios_mod = { 684 "x86bios", 685 x86bios_modevent, 686 NULL, 687 }; 688 689 DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY); 690 MODULE_VERSION(x86bios, 1); 691