1 /*- 2 * Copyright (c) 2009 Alex Keda <admin@lissyara.su> 3 * Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_x86bios.h" 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/lock.h> 37 #include <sys/malloc.h> 38 #include <sys/module.h> 39 #include <sys/mutex.h> 40 #include <sys/sysctl.h> 41 42 #include <contrib/x86emu/x86emu.h> 43 #include <contrib/x86emu/x86emu_regs.h> 44 #include <compat/x86bios/x86bios.h> 45 46 #include <dev/pci/pcireg.h> 47 #include <dev/pci/pcivar.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 52 #ifdef __amd64__ 53 #define X86BIOS_NATIVE_ARCH 54 #endif 55 #ifdef __i386__ 56 #define X86BIOS_NATIVE_VM86 57 #endif 58 59 #define X86BIOS_MEM_SIZE 0x00100000 /* 1M */ 60 61 #define X86BIOS_TRACE(h, n, r) do { \ 62 printf(__STRING(h) \ 63 " (ax=0x%04x bx=0x%04x cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",\ 64 (n), (r)->R_AX, (r)->R_BX, (r)->R_CX, (r)->R_DX, \ 65 (r)->R_ES, (r)->R_DI); \ 66 } while (0) 67 68 static struct mtx x86bios_lock; 69 70 SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL, "x86bios debugging"); 71 static int x86bios_trace_call; 72 TUNABLE_INT("debug.x86bios.call", &x86bios_trace_call); 73 SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RW, &x86bios_trace_call, 0, 74 "Trace far function calls"); 75 static int x86bios_trace_int; 76 TUNABLE_INT("debug.x86bios.int", &x86bios_trace_int); 77 SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RW, &x86bios_trace_int, 0, 78 "Trace software interrupt handlers"); 79 80 #ifdef X86BIOS_NATIVE_VM86 81 82 #include <machine/vm86.h> 83 #include <machine/vmparam.h> 84 #include <machine/pc/bios.h> 85 86 struct vm86context x86bios_vmc; 87 88 static void 89 x86bios_emu2vmf(struct x86emu_regs *regs, struct vm86frame *vmf) 90 { 91 92 vmf->vmf_ds = regs->R_DS; 93 vmf->vmf_es = regs->R_ES; 94 vmf->vmf_ax = regs->R_AX; 95 vmf->vmf_bx = regs->R_BX; 96 vmf->vmf_cx = regs->R_CX; 97 vmf->vmf_dx = regs->R_DX; 98 vmf->vmf_bp = regs->R_BP; 99 vmf->vmf_si = regs->R_SI; 100 vmf->vmf_di = regs->R_DI; 101 } 102 103 static void 104 x86bios_vmf2emu(struct vm86frame *vmf, struct x86emu_regs *regs) 105 { 106 107 regs->R_DS = vmf->vmf_ds; 108 regs->R_ES = vmf->vmf_es; 109 regs->R_FLG = vmf->vmf_flags; 110 regs->R_AX = vmf->vmf_ax; 111 regs->R_BX = vmf->vmf_bx; 112 regs->R_CX = vmf->vmf_cx; 113 regs->R_DX = vmf->vmf_dx; 114 regs->R_BP = vmf->vmf_bp; 115 regs->R_SI = vmf->vmf_si; 116 regs->R_DI = vmf->vmf_di; 117 } 118 119 void * 120 x86bios_alloc(uint32_t *offset, size_t size, int flags) 121 { 122 void *vaddr; 123 int i; 124 125 if (offset == NULL || size == 0) 126 return (NULL); 127 vaddr = contigmalloc(size, M_DEVBUF, flags, 0, X86BIOS_MEM_SIZE, 128 PAGE_SIZE, 0); 129 if (vaddr != NULL) { 130 *offset = vtophys(vaddr); 131 mtx_lock(&x86bios_lock); 132 for (i = 0; i < atop(round_page(size)); i++) 133 vm86_addpage(&x86bios_vmc, atop(*offset) + i, 134 (vm_offset_t)vaddr + ptoa(i)); 135 mtx_unlock(&x86bios_lock); 136 } 137 138 return (vaddr); 139 } 140 141 void 142 x86bios_free(void *addr, size_t size) 143 { 144 vm_paddr_t paddr; 145 int i, nfree; 146 147 if (addr == NULL || size == 0) 148 return; 149 paddr = vtophys(addr); 150 if (paddr >= X86BIOS_MEM_SIZE || (paddr & PAGE_MASK) != 0) 151 return; 152 mtx_lock(&x86bios_lock); 153 for (i = 0; i < x86bios_vmc.npages; i++) 154 if (x86bios_vmc.pmap[i].kva == (vm_offset_t)addr) 155 break; 156 if (i >= x86bios_vmc.npages) { 157 mtx_unlock(&x86bios_lock); 158 return; 159 } 160 nfree = atop(round_page(size)); 161 bzero(x86bios_vmc.pmap + i, sizeof(*x86bios_vmc.pmap) * nfree); 162 if (i + nfree == x86bios_vmc.npages) { 163 x86bios_vmc.npages -= nfree; 164 while (--i >= 0 && x86bios_vmc.pmap[i].kva == 0) 165 x86bios_vmc.npages--; 166 } 167 mtx_unlock(&x86bios_lock); 168 contigfree(addr, size, M_DEVBUF); 169 } 170 171 void 172 x86bios_init_regs(struct x86regs *regs) 173 { 174 175 bzero(regs, sizeof(*regs)); 176 } 177 178 void 179 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off) 180 { 181 struct vm86frame vmf; 182 183 if (x86bios_trace_call) 184 X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs); 185 186 bzero(&vmf, sizeof(vmf)); 187 x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf); 188 vmf.vmf_cs = seg; 189 vmf.vmf_ip = off; 190 mtx_lock(&x86bios_lock); 191 vm86_datacall(-1, &vmf, &x86bios_vmc); 192 mtx_unlock(&x86bios_lock); 193 x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs); 194 195 if (x86bios_trace_call) 196 X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs); 197 } 198 199 uint32_t 200 x86bios_get_intr(int intno) 201 { 202 203 return (readl(BIOS_PADDRTOVADDR(intno * 4))); 204 } 205 206 void 207 x86bios_intr(struct x86regs *regs, int intno) 208 { 209 struct vm86frame vmf; 210 211 if (x86bios_trace_int) 212 X86BIOS_TRACE(Calling INT 0x%02x, intno, regs); 213 214 bzero(&vmf, sizeof(vmf)); 215 x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf); 216 mtx_lock(&x86bios_lock); 217 vm86_datacall(intno, &vmf, &x86bios_vmc); 218 mtx_unlock(&x86bios_lock); 219 x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs); 220 221 if (x86bios_trace_int) 222 X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs); 223 } 224 225 void * 226 x86bios_offset(uint32_t offset) 227 { 228 vm_offset_t addr; 229 230 addr = vm86_getaddr(&x86bios_vmc, X86BIOS_PHYSTOSEG(offset), 231 X86BIOS_PHYSTOOFF(offset)); 232 if (addr == 0) 233 addr = BIOS_PADDRTOVADDR(offset); 234 235 return ((void *)addr); 236 } 237 238 static int 239 x86bios_init(void) 240 { 241 242 mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF); 243 bzero(&x86bios_vmc, sizeof(x86bios_vmc)); 244 245 return (0); 246 } 247 248 static int 249 x86bios_uninit(void) 250 { 251 252 mtx_destroy(&x86bios_lock); 253 254 return (0); 255 } 256 257 #else 258 259 #include <machine/iodev.h> 260 261 #define X86BIOS_PAGE_SIZE 0x00001000 /* 4K */ 262 263 #define X86BIOS_IVT_SIZE 0x00000500 /* 1K + 256 (BDA) */ 264 265 #define X86BIOS_IVT_BASE 0x00000000 266 #define X86BIOS_RAM_BASE 0x00001000 267 #define X86BIOS_ROM_BASE 0x000a0000 268 269 #define X86BIOS_ROM_SIZE (X86BIOS_MEM_SIZE - x86bios_rom_phys) 270 #define X86BIOS_SEG_SIZE X86BIOS_PAGE_SIZE 271 272 #define X86BIOS_PAGES (X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE) 273 274 #define X86BIOS_R_SS _pad2 275 #define X86BIOS_R_SP _pad3.I16_reg.x_reg 276 277 static struct x86emu x86bios_emu; 278 279 static void *x86bios_ivt; 280 static void *x86bios_rom; 281 static void *x86bios_seg; 282 283 static vm_offset_t *x86bios_map; 284 285 static vm_paddr_t x86bios_rom_phys; 286 static vm_paddr_t x86bios_seg_phys; 287 288 static int x86bios_fault; 289 static uint32_t x86bios_fault_addr; 290 static uint16_t x86bios_fault_cs; 291 static uint16_t x86bios_fault_ip; 292 293 static void 294 x86bios_set_fault(struct x86emu *emu, uint32_t addr) 295 { 296 297 x86bios_fault = 1; 298 x86bios_fault_addr = addr; 299 x86bios_fault_cs = emu->x86.R_CS; 300 x86bios_fault_ip = emu->x86.R_IP; 301 x86emu_halt_sys(emu); 302 } 303 304 static void * 305 x86bios_get_pages(uint32_t offset, size_t size) 306 { 307 vm_offset_t addr; 308 309 if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE) 310 return (NULL); 311 312 if (offset >= X86BIOS_MEM_SIZE) 313 offset -= X86BIOS_MEM_SIZE; 314 addr = x86bios_map[offset / X86BIOS_PAGE_SIZE]; 315 if (addr != 0) 316 addr += offset % X86BIOS_PAGE_SIZE; 317 318 return ((void *)addr); 319 } 320 321 static void 322 x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size) 323 { 324 int i, j; 325 326 for (i = pa / X86BIOS_PAGE_SIZE, j = 0; 327 j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++) 328 x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE; 329 } 330 331 static uint8_t 332 x86bios_emu_rdb(struct x86emu *emu, uint32_t addr) 333 { 334 uint8_t *va; 335 336 va = x86bios_get_pages(addr, sizeof(*va)); 337 if (va == NULL) 338 x86bios_set_fault(emu, addr); 339 340 return (*va); 341 } 342 343 static uint16_t 344 x86bios_emu_rdw(struct x86emu *emu, uint32_t addr) 345 { 346 uint16_t *va; 347 348 va = x86bios_get_pages(addr, sizeof(*va)); 349 if (va == NULL) 350 x86bios_set_fault(emu, addr); 351 352 #ifndef __NO_STRICT_ALIGNMENT 353 if ((addr & 1) != 0) 354 return (le16dec(va)); 355 else 356 #endif 357 return (le16toh(*va)); 358 } 359 360 static uint32_t 361 x86bios_emu_rdl(struct x86emu *emu, uint32_t addr) 362 { 363 uint32_t *va; 364 365 va = x86bios_get_pages(addr, sizeof(*va)); 366 if (va == NULL) 367 x86bios_set_fault(emu, addr); 368 369 #ifndef __NO_STRICT_ALIGNMENT 370 if ((addr & 3) != 0) 371 return (le32dec(va)); 372 else 373 #endif 374 return (le32toh(*va)); 375 } 376 377 static void 378 x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val) 379 { 380 uint8_t *va; 381 382 va = x86bios_get_pages(addr, sizeof(*va)); 383 if (va == NULL) 384 x86bios_set_fault(emu, addr); 385 386 *va = val; 387 } 388 389 static void 390 x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val) 391 { 392 uint16_t *va; 393 394 va = x86bios_get_pages(addr, sizeof(*va)); 395 if (va == NULL) 396 x86bios_set_fault(emu, addr); 397 398 #ifndef __NO_STRICT_ALIGNMENT 399 if ((addr & 1) != 0) 400 le16enc(va, val); 401 else 402 #endif 403 *va = htole16(val); 404 } 405 406 static void 407 x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val) 408 { 409 uint32_t *va; 410 411 va = x86bios_get_pages(addr, sizeof(*va)); 412 if (va == NULL) 413 x86bios_set_fault(emu, addr); 414 415 #ifndef __NO_STRICT_ALIGNMENT 416 if ((addr & 3) != 0) 417 le32enc(va, val); 418 else 419 #endif 420 *va = htole32(val); 421 } 422 423 static uint8_t 424 x86bios_emu_inb(struct x86emu *emu, uint16_t port) 425 { 426 427 #ifndef X86BIOS_NATIVE_ARCH 428 if (port == 0xb2) /* APM scratch register */ 429 return (0); 430 if (port >= 0x80 && port < 0x88) /* POST status register */ 431 return (0); 432 #endif 433 434 return (iodev_read_1(port)); 435 } 436 437 static uint16_t 438 x86bios_emu_inw(struct x86emu *emu, uint16_t port) 439 { 440 uint16_t val; 441 442 #ifndef X86BIOS_NATIVE_ARCH 443 if (port >= 0x80 && port < 0x88) /* POST status register */ 444 return (0); 445 446 if ((port & 1) != 0) { 447 val = iodev_read_1(port); 448 val |= iodev_read_1(port + 1) << 8; 449 } else 450 #endif 451 val = iodev_read_2(port); 452 453 return (val); 454 } 455 456 static uint32_t 457 x86bios_emu_inl(struct x86emu *emu, uint16_t port) 458 { 459 uint32_t val; 460 461 #ifndef X86BIOS_NATIVE_ARCH 462 if (port >= 0x80 && port < 0x88) /* POST status register */ 463 return (0); 464 465 if ((port & 1) != 0) { 466 val = iodev_read_1(port); 467 val |= iodev_read_2(port + 1) << 8; 468 val |= iodev_read_1(port + 3) << 24; 469 } else if ((port & 2) != 0) { 470 val = iodev_read_2(port); 471 val |= iodev_read_2(port + 2) << 16; 472 } else 473 #endif 474 val = iodev_read_4(port); 475 476 return (val); 477 } 478 479 static void 480 x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val) 481 { 482 483 #ifndef X86BIOS_NATIVE_ARCH 484 if (port == 0xb2) /* APM scratch register */ 485 return; 486 if (port >= 0x80 && port < 0x88) /* POST status register */ 487 return; 488 #endif 489 490 iodev_write_1(port, val); 491 } 492 493 static void 494 x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val) 495 { 496 497 #ifndef X86BIOS_NATIVE_ARCH 498 if (port >= 0x80 && port < 0x88) /* POST status register */ 499 return; 500 501 if ((port & 1) != 0) { 502 iodev_write_1(port, val); 503 iodev_write_1(port + 1, val >> 8); 504 } else 505 #endif 506 iodev_write_2(port, val); 507 } 508 509 static void 510 x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val) 511 { 512 513 #ifndef X86BIOS_NATIVE_ARCH 514 if (port >= 0x80 && port < 0x88) /* POST status register */ 515 return; 516 517 if ((port & 1) != 0) { 518 iodev_write_1(port, val); 519 iodev_write_2(port + 1, val >> 8); 520 iodev_write_1(port + 3, val >> 24); 521 } else if ((port & 2) != 0) { 522 iodev_write_2(port, val); 523 iodev_write_2(port + 2, val >> 16); 524 } else 525 #endif 526 iodev_write_4(port, val); 527 } 528 529 static void 530 x86bios_emu_get_intr(struct x86emu *emu, int intno) 531 { 532 uint16_t *sp; 533 uint32_t iv; 534 535 emu->x86.R_SP -= 6; 536 537 sp = (uint16_t *)((vm_offset_t)x86bios_seg + emu->x86.R_SP); 538 sp[0] = htole16(emu->x86.R_IP); 539 sp[1] = htole16(emu->x86.R_CS); 540 sp[2] = htole16(emu->x86.R_FLG); 541 542 iv = x86bios_get_intr(intno); 543 emu->x86.R_IP = iv & 0xffff; 544 emu->x86.R_CS = (iv >> 16) & 0xffff; 545 emu->x86.R_FLG &= ~(F_IF | F_TF); 546 } 547 548 void * 549 x86bios_alloc(uint32_t *offset, size_t size, int flags) 550 { 551 void *vaddr; 552 553 if (offset == NULL || size == 0) 554 return (NULL); 555 vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE, 556 x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0); 557 if (vaddr != NULL) { 558 *offset = vtophys(vaddr); 559 mtx_lock(&x86bios_lock); 560 x86bios_set_pages((vm_offset_t)vaddr, *offset, size); 561 mtx_unlock(&x86bios_lock); 562 } 563 564 return (vaddr); 565 } 566 567 void 568 x86bios_free(void *addr, size_t size) 569 { 570 vm_paddr_t paddr; 571 572 if (addr == NULL || size == 0) 573 return; 574 paddr = vtophys(addr); 575 if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys || 576 paddr % X86BIOS_PAGE_SIZE != 0) 577 return; 578 mtx_lock(&x86bios_lock); 579 bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE, 580 sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE)); 581 mtx_unlock(&x86bios_lock); 582 contigfree(addr, size, M_DEVBUF); 583 } 584 585 void 586 x86bios_init_regs(struct x86regs *regs) 587 { 588 589 bzero(regs, sizeof(*regs)); 590 regs->X86BIOS_R_SS = X86BIOS_PHYSTOSEG(x86bios_seg_phys); 591 regs->X86BIOS_R_SP = X86BIOS_PAGE_SIZE - 2; 592 } 593 594 void 595 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off) 596 { 597 598 if (x86bios_trace_call) 599 X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs); 600 601 mtx_lock(&x86bios_lock); 602 memcpy(&x86bios_emu.x86, regs, sizeof(*regs)); 603 x86bios_fault = 0; 604 spinlock_enter(); 605 x86emu_exec_call(&x86bios_emu, seg, off); 606 spinlock_exit(); 607 memcpy(regs, &x86bios_emu.x86, sizeof(*regs)); 608 mtx_unlock(&x86bios_lock); 609 610 if (x86bios_trace_call) { 611 X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs); 612 if (x86bios_fault) 613 printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n", 614 x86bios_fault_addr, x86bios_fault_cs, 615 x86bios_fault_ip); 616 } 617 } 618 619 uint32_t 620 x86bios_get_intr(int intno) 621 { 622 uint32_t *iv; 623 624 iv = (uint32_t *)((vm_offset_t)x86bios_ivt + intno * 4); 625 626 return (le32toh(*iv)); 627 } 628 629 void 630 x86bios_intr(struct x86regs *regs, int intno) 631 { 632 633 if (intno < 0 || intno > 255) 634 return; 635 636 if (x86bios_trace_int) 637 X86BIOS_TRACE(Calling INT 0x%02x, intno, regs); 638 639 mtx_lock(&x86bios_lock); 640 memcpy(&x86bios_emu.x86, regs, sizeof(*regs)); 641 x86bios_fault = 0; 642 spinlock_enter(); 643 x86emu_exec_intr(&x86bios_emu, intno); 644 spinlock_exit(); 645 memcpy(regs, &x86bios_emu.x86, sizeof(*regs)); 646 mtx_unlock(&x86bios_lock); 647 648 if (x86bios_trace_int) { 649 X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs); 650 if (x86bios_fault) 651 printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n", 652 x86bios_fault_addr, x86bios_fault_cs, 653 x86bios_fault_ip); 654 } 655 } 656 657 void * 658 x86bios_offset(uint32_t offset) 659 { 660 661 return (x86bios_get_pages(offset, 1)); 662 } 663 664 static __inline void 665 x86bios_unmap_mem(void) 666 { 667 668 free(x86bios_map, M_DEVBUF); 669 if (x86bios_ivt != NULL) 670 #ifdef X86BIOS_NATIVE_ARCH 671 pmap_unmapdev((vm_offset_t)x86bios_ivt, X86BIOS_IVT_SIZE); 672 #else 673 free(x86bios_ivt, M_DEVBUF); 674 #endif 675 if (x86bios_rom != NULL) 676 pmap_unmapdev((vm_offset_t)x86bios_rom, X86BIOS_ROM_SIZE); 677 if (x86bios_seg != NULL) 678 contigfree(x86bios_seg, X86BIOS_SEG_SIZE, M_DEVBUF); 679 } 680 681 static __inline int 682 x86bios_map_mem(void) 683 { 684 685 x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF, 686 M_WAITOK | M_ZERO); 687 688 #ifdef X86BIOS_NATIVE_ARCH 689 x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE); 690 691 /* Probe EBDA via BDA. */ 692 x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e); 693 x86bios_rom_phys = x86bios_rom_phys << 4; 694 if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE && 695 X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024) 696 x86bios_rom_phys = 697 rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE); 698 else 699 #else 700 x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_ZERO | M_WAITOK); 701 #endif 702 703 x86bios_rom_phys = X86BIOS_ROM_BASE; 704 x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE); 705 if (x86bios_rom == NULL) 706 goto fail; 707 #ifdef X86BIOS_NATIVE_ARCH 708 /* Change attribute for EBDA. */ 709 if (x86bios_rom_phys < X86BIOS_ROM_BASE && 710 pmap_change_attr((vm_offset_t)x86bios_rom, 711 X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0) 712 goto fail; 713 #endif 714 715 x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_WAITOK, 716 X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0); 717 x86bios_seg_phys = vtophys(x86bios_seg); 718 719 x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE, 720 X86BIOS_IVT_SIZE); 721 x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys, 722 X86BIOS_ROM_SIZE); 723 x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys, 724 X86BIOS_SEG_SIZE); 725 726 if (bootverbose) { 727 printf("x86bios: IVT 0x%06jx-0x%06jx at %p\n", 728 (vm_paddr_t)X86BIOS_IVT_BASE, 729 (vm_paddr_t)X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1, 730 x86bios_ivt); 731 printf("x86bios: SSEG 0x%06jx-0x%06jx at %p\n", 732 x86bios_seg_phys, 733 (vm_paddr_t)X86BIOS_SEG_SIZE + x86bios_seg_phys - 1, 734 x86bios_seg); 735 if (x86bios_rom_phys < X86BIOS_ROM_BASE) 736 printf("x86bios: EBDA 0x%06jx-0x%06jx at %p\n", 737 x86bios_rom_phys, (vm_paddr_t)X86BIOS_ROM_BASE - 1, 738 x86bios_rom); 739 printf("x86bios: ROM 0x%06jx-0x%06jx at %p\n", 740 (vm_paddr_t)X86BIOS_ROM_BASE, 741 (vm_paddr_t)X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1, 742 (caddr_t)x86bios_rom + X86BIOS_ROM_BASE - x86bios_rom_phys); 743 } 744 745 return (0); 746 747 fail: 748 x86bios_unmap_mem(); 749 750 return (1); 751 } 752 753 static int 754 x86bios_init(void) 755 { 756 int i; 757 758 mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF); 759 760 if (x86bios_map_mem() != 0) 761 return (ENOMEM); 762 763 bzero(&x86bios_emu, sizeof(x86bios_emu)); 764 765 x86bios_emu.emu_rdb = x86bios_emu_rdb; 766 x86bios_emu.emu_rdw = x86bios_emu_rdw; 767 x86bios_emu.emu_rdl = x86bios_emu_rdl; 768 x86bios_emu.emu_wrb = x86bios_emu_wrb; 769 x86bios_emu.emu_wrw = x86bios_emu_wrw; 770 x86bios_emu.emu_wrl = x86bios_emu_wrl; 771 772 x86bios_emu.emu_inb = x86bios_emu_inb; 773 x86bios_emu.emu_inw = x86bios_emu_inw; 774 x86bios_emu.emu_inl = x86bios_emu_inl; 775 x86bios_emu.emu_outb = x86bios_emu_outb; 776 x86bios_emu.emu_outw = x86bios_emu_outw; 777 x86bios_emu.emu_outl = x86bios_emu_outl; 778 779 for (i = 0; i < 256; i++) 780 x86bios_emu._x86emu_intrTab[i] = x86bios_emu_get_intr; 781 782 return (0); 783 } 784 785 static int 786 x86bios_uninit(void) 787 { 788 789 x86bios_unmap_mem(); 790 mtx_destroy(&x86bios_lock); 791 792 return (0); 793 } 794 795 #endif 796 797 void * 798 x86bios_get_orm(uint32_t offset) 799 { 800 uint8_t *p; 801 802 /* Does the shadow ROM contain BIOS POST code for x86? */ 803 p = x86bios_offset(offset); 804 if (p == NULL || p[0] != 0x55 || p[1] != 0xaa || p[3] != 0xe9) 805 return (NULL); 806 807 return (p); 808 } 809 810 int 811 x86bios_match_device(uint32_t offset, device_t dev) 812 { 813 uint8_t *p; 814 uint16_t device, vendor; 815 uint8_t class, progif, subclass; 816 817 /* Does the shadow ROM contain BIOS POST code for x86? */ 818 p = x86bios_get_orm(offset); 819 if (p == NULL) 820 return (0); 821 822 /* Does it contain PCI data structure? */ 823 p += le16toh(*(uint16_t *)(p + 0x18)); 824 if (bcmp(p, "PCIR", 4) != 0 || 825 le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0) 826 return (0); 827 828 /* Does it match the vendor, device, and classcode? */ 829 vendor = le16toh(*(uint16_t *)(p + 0x04)); 830 device = le16toh(*(uint16_t *)(p + 0x06)); 831 progif = *(p + 0x0d); 832 subclass = *(p + 0x0e); 833 class = *(p + 0x0f); 834 if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) || 835 class != pci_get_class(dev) || subclass != pci_get_subclass(dev) || 836 progif != pci_get_progif(dev)) 837 return (0); 838 839 return (1); 840 } 841 842 static int 843 x86bios_modevent(module_t mod __unused, int type, void *data __unused) 844 { 845 846 switch (type) { 847 case MOD_LOAD: 848 return (x86bios_init()); 849 case MOD_UNLOAD: 850 return (x86bios_uninit()); 851 default: 852 return (ENOTSUP); 853 } 854 } 855 856 static moduledata_t x86bios_mod = { 857 "x86bios", 858 x86bios_modevent, 859 NULL, 860 }; 861 862 DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY); 863 MODULE_VERSION(x86bios, 1); 864