xref: /freebsd/sys/compat/x86bios/x86bios.c (revision a3cf0ef5a295c885c895fabfd56470c0d1db322d)
1 /*-
2  * Copyright (c) 2009 Alex Keda <admin@lissyara.su>
3  * Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_x86bios.h"
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/sysctl.h>
41 
42 #include <contrib/x86emu/x86emu.h>
43 #include <contrib/x86emu/x86emu_regs.h>
44 #include <compat/x86bios/x86bios.h>
45 
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
48 
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 
52 #ifdef __amd64__
53 #define	X86BIOS_NATIVE_ARCH
54 #endif
55 #ifdef __i386__
56 #define	X86BIOS_NATIVE_VM86
57 #endif
58 
59 #define	X86BIOS_MEM_SIZE	0x00100000	/* 1M */
60 
61 #define	X86BIOS_TRACE(h, n, r)	do {					\
62 	printf(__STRING(h)						\
63 	    " (ax=0x%04x bx=0x%04x cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",\
64 	    (n), (r)->R_AX, (r)->R_BX, (r)->R_CX, (r)->R_DX,		\
65 	    (r)->R_ES, (r)->R_DI);					\
66 } while (0)
67 
68 static struct mtx x86bios_lock;
69 
70 SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL, "x86bios debugging");
71 static int x86bios_trace_call;
72 TUNABLE_INT("debug.x86bios.call", &x86bios_trace_call);
73 SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RW, &x86bios_trace_call, 0,
74     "Trace far function calls");
75 static int x86bios_trace_int;
76 TUNABLE_INT("debug.x86bios.int", &x86bios_trace_int);
77 SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RW, &x86bios_trace_int, 0,
78     "Trace software interrupt handlers");
79 
80 #ifdef X86BIOS_NATIVE_VM86
81 
82 #include <machine/vm86.h>
83 #include <machine/vmparam.h>
84 #include <machine/pc/bios.h>
85 
86 struct vm86context x86bios_vmc;
87 
88 static void
89 x86bios_emu2vmf(struct x86emu_regs *regs, struct vm86frame *vmf)
90 {
91 
92 	vmf->vmf_ds = regs->R_DS;
93 	vmf->vmf_es = regs->R_ES;
94 	vmf->vmf_ax = regs->R_AX;
95 	vmf->vmf_bx = regs->R_BX;
96 	vmf->vmf_cx = regs->R_CX;
97 	vmf->vmf_dx = regs->R_DX;
98 	vmf->vmf_bp = regs->R_BP;
99 	vmf->vmf_si = regs->R_SI;
100 	vmf->vmf_di = regs->R_DI;
101 }
102 
103 static void
104 x86bios_vmf2emu(struct vm86frame *vmf, struct x86emu_regs *regs)
105 {
106 
107 	regs->R_DS = vmf->vmf_ds;
108 	regs->R_ES = vmf->vmf_es;
109 	regs->R_FLG = vmf->vmf_flags;
110 	regs->R_AX = vmf->vmf_ax;
111 	regs->R_BX = vmf->vmf_bx;
112 	regs->R_CX = vmf->vmf_cx;
113 	regs->R_DX = vmf->vmf_dx;
114 	regs->R_BP = vmf->vmf_bp;
115 	regs->R_SI = vmf->vmf_si;
116 	regs->R_DI = vmf->vmf_di;
117 }
118 
119 void *
120 x86bios_alloc(uint32_t *offset, size_t size, int flags)
121 {
122 	void *vaddr;
123 	int i;
124 
125 	if (offset == NULL || size == 0)
126 		return (NULL);
127 	vaddr = contigmalloc(size, M_DEVBUF, flags, 0, X86BIOS_MEM_SIZE,
128 	    PAGE_SIZE, 0);
129 	if (vaddr != NULL) {
130 		*offset = vtophys(vaddr);
131 		mtx_lock(&x86bios_lock);
132 		for (i = 0; i < atop(round_page(size)); i++)
133 			vm86_addpage(&x86bios_vmc, atop(*offset) + i,
134 			    (vm_offset_t)vaddr + ptoa(i));
135 		mtx_unlock(&x86bios_lock);
136 	}
137 
138 	return (vaddr);
139 }
140 
141 void
142 x86bios_free(void *addr, size_t size)
143 {
144 	vm_paddr_t paddr;
145 	int i, nfree;
146 
147 	if (addr == NULL || size == 0)
148 		return;
149 	paddr = vtophys(addr);
150 	if (paddr >= X86BIOS_MEM_SIZE || (paddr & PAGE_MASK) != 0)
151 		return;
152 	mtx_lock(&x86bios_lock);
153 	for (i = 0; i < x86bios_vmc.npages; i++)
154 		if (x86bios_vmc.pmap[i].kva == (vm_offset_t)addr)
155 			break;
156 	if (i >= x86bios_vmc.npages) {
157 		mtx_unlock(&x86bios_lock);
158 		return;
159 	}
160 	nfree = atop(round_page(size));
161 	bzero(x86bios_vmc.pmap + i, sizeof(*x86bios_vmc.pmap) * nfree);
162 	if (i + nfree == x86bios_vmc.npages) {
163 		x86bios_vmc.npages -= nfree;
164 		while (--i >= 0 && x86bios_vmc.pmap[i].kva == 0)
165 			x86bios_vmc.npages--;
166 	}
167 	mtx_unlock(&x86bios_lock);
168 	contigfree(addr, size, M_DEVBUF);
169 }
170 
171 void
172 x86bios_init_regs(struct x86regs *regs)
173 {
174 
175 	bzero(regs, sizeof(*regs));
176 }
177 
178 void
179 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
180 {
181 	struct vm86frame vmf;
182 
183 	if (x86bios_trace_call)
184 		X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
185 
186 	bzero(&vmf, sizeof(vmf));
187 	x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
188 	vmf.vmf_cs = seg;
189 	vmf.vmf_ip = off;
190 	mtx_lock(&x86bios_lock);
191 	vm86_datacall(-1, &vmf, &x86bios_vmc);
192 	mtx_unlock(&x86bios_lock);
193 	x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
194 
195 	if (x86bios_trace_call)
196 		X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
197 }
198 
199 uint32_t
200 x86bios_get_intr(int intno)
201 {
202 
203 	return (readl(BIOS_PADDRTOVADDR(intno * 4)));
204 }
205 
206 void
207 x86bios_set_intr(int intno, uint32_t saddr)
208 {
209 
210 	writel(BIOS_PADDRTOVADDR(intno * 4), saddr);
211 }
212 
213 void
214 x86bios_intr(struct x86regs *regs, int intno)
215 {
216 	struct vm86frame vmf;
217 
218 	if (x86bios_trace_int)
219 		X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
220 
221 	bzero(&vmf, sizeof(vmf));
222 	x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
223 	mtx_lock(&x86bios_lock);
224 	vm86_datacall(intno, &vmf, &x86bios_vmc);
225 	mtx_unlock(&x86bios_lock);
226 	x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
227 
228 	if (x86bios_trace_int)
229 		X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
230 }
231 
232 void *
233 x86bios_offset(uint32_t offset)
234 {
235 	vm_offset_t addr;
236 
237 	addr = vm86_getaddr(&x86bios_vmc, X86BIOS_PHYSTOSEG(offset),
238 	    X86BIOS_PHYSTOOFF(offset));
239 	if (addr == 0)
240 		addr = BIOS_PADDRTOVADDR(offset);
241 
242 	return ((void *)addr);
243 }
244 
245 static int
246 x86bios_init(void)
247 {
248 
249 	mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
250 	bzero(&x86bios_vmc, sizeof(x86bios_vmc));
251 
252 	return (0);
253 }
254 
255 static int
256 x86bios_uninit(void)
257 {
258 
259 	mtx_destroy(&x86bios_lock);
260 
261 	return (0);
262 }
263 
264 #else
265 
266 #include <machine/iodev.h>
267 
268 #define	X86BIOS_PAGE_SIZE	0x00001000	/* 4K */
269 
270 #define	X86BIOS_IVT_SIZE	0x00000500	/* 1K + 256 (BDA) */
271 
272 #define	X86BIOS_IVT_BASE	0x00000000
273 #define	X86BIOS_RAM_BASE	0x00001000
274 #define	X86BIOS_ROM_BASE	0x000a0000
275 
276 #define	X86BIOS_ROM_SIZE	(X86BIOS_MEM_SIZE - x86bios_rom_phys)
277 #define	X86BIOS_SEG_SIZE	X86BIOS_PAGE_SIZE
278 
279 #define	X86BIOS_PAGES		(X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE)
280 
281 #define	X86BIOS_R_SS		_pad2
282 #define	X86BIOS_R_SP		_pad3.I16_reg.x_reg
283 
284 static struct x86emu x86bios_emu;
285 
286 static void *x86bios_ivt;
287 static void *x86bios_rom;
288 static void *x86bios_seg;
289 
290 static vm_offset_t *x86bios_map;
291 
292 static vm_paddr_t x86bios_rom_phys;
293 static vm_paddr_t x86bios_seg_phys;
294 
295 static int x86bios_fault;
296 static uint32_t x86bios_fault_addr;
297 static uint16_t x86bios_fault_cs;
298 static uint16_t x86bios_fault_ip;
299 
300 static void
301 x86bios_set_fault(struct x86emu *emu, uint32_t addr)
302 {
303 
304 	x86bios_fault = 1;
305 	x86bios_fault_addr = addr;
306 	x86bios_fault_cs = emu->x86.R_CS;
307 	x86bios_fault_ip = emu->x86.R_IP;
308 	x86emu_halt_sys(emu);
309 }
310 
311 static void *
312 x86bios_get_pages(uint32_t offset, size_t size)
313 {
314 	vm_offset_t addr;
315 
316 	if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE)
317 		return (NULL);
318 
319 	if (offset >= X86BIOS_MEM_SIZE)
320 		offset -= X86BIOS_MEM_SIZE;
321 	addr = x86bios_map[offset / X86BIOS_PAGE_SIZE];
322 	if (addr != 0)
323 		addr += offset % X86BIOS_PAGE_SIZE;
324 
325 	return ((void *)addr);
326 }
327 
328 static void
329 x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size)
330 {
331 	int i, j;
332 
333 	for (i = pa / X86BIOS_PAGE_SIZE, j = 0;
334 	    j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++)
335 		x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE;
336 }
337 
338 static uint8_t
339 x86bios_emu_rdb(struct x86emu *emu, uint32_t addr)
340 {
341 	uint8_t *va;
342 
343 	va = x86bios_get_pages(addr, sizeof(*va));
344 	if (va == NULL)
345 		x86bios_set_fault(emu, addr);
346 
347 	return (*va);
348 }
349 
350 static uint16_t
351 x86bios_emu_rdw(struct x86emu *emu, uint32_t addr)
352 {
353 	uint16_t *va;
354 
355 	va = x86bios_get_pages(addr, sizeof(*va));
356 	if (va == NULL)
357 		x86bios_set_fault(emu, addr);
358 
359 #ifndef __NO_STRICT_ALIGNMENT
360 	if ((addr & 1) != 0)
361 		return (le16dec(va));
362 	else
363 #endif
364 	return (le16toh(*va));
365 }
366 
367 static uint32_t
368 x86bios_emu_rdl(struct x86emu *emu, uint32_t addr)
369 {
370 	uint32_t *va;
371 
372 	va = x86bios_get_pages(addr, sizeof(*va));
373 	if (va == NULL)
374 		x86bios_set_fault(emu, addr);
375 
376 #ifndef __NO_STRICT_ALIGNMENT
377 	if ((addr & 3) != 0)
378 		return (le32dec(va));
379 	else
380 #endif
381 	return (le32toh(*va));
382 }
383 
384 static void
385 x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val)
386 {
387 	uint8_t *va;
388 
389 	va = x86bios_get_pages(addr, sizeof(*va));
390 	if (va == NULL)
391 		x86bios_set_fault(emu, addr);
392 
393 	*va = val;
394 }
395 
396 static void
397 x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val)
398 {
399 	uint16_t *va;
400 
401 	va = x86bios_get_pages(addr, sizeof(*va));
402 	if (va == NULL)
403 		x86bios_set_fault(emu, addr);
404 
405 #ifndef __NO_STRICT_ALIGNMENT
406 	if ((addr & 1) != 0)
407 		le16enc(va, val);
408 	else
409 #endif
410 	*va = htole16(val);
411 }
412 
413 static void
414 x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val)
415 {
416 	uint32_t *va;
417 
418 	va = x86bios_get_pages(addr, sizeof(*va));
419 	if (va == NULL)
420 		x86bios_set_fault(emu, addr);
421 
422 #ifndef __NO_STRICT_ALIGNMENT
423 	if ((addr & 3) != 0)
424 		le32enc(va, val);
425 	else
426 #endif
427 	*va = htole32(val);
428 }
429 
430 static uint8_t
431 x86bios_emu_inb(struct x86emu *emu, uint16_t port)
432 {
433 
434 #ifndef X86BIOS_NATIVE_ARCH
435 	if (port == 0xb2) /* APM scratch register */
436 		return (0);
437 	if (port >= 0x80 && port < 0x88) /* POST status register */
438 		return (0);
439 #endif
440 
441 	return (iodev_read_1(port));
442 }
443 
444 static uint16_t
445 x86bios_emu_inw(struct x86emu *emu, uint16_t port)
446 {
447 	uint16_t val;
448 
449 #ifndef X86BIOS_NATIVE_ARCH
450 	if (port >= 0x80 && port < 0x88) /* POST status register */
451 		return (0);
452 
453 	if ((port & 1) != 0) {
454 		val = iodev_read_1(port);
455 		val |= iodev_read_1(port + 1) << 8;
456 	} else
457 #endif
458 	val = iodev_read_2(port);
459 
460 	return (val);
461 }
462 
463 static uint32_t
464 x86bios_emu_inl(struct x86emu *emu, uint16_t port)
465 {
466 	uint32_t val;
467 
468 #ifndef X86BIOS_NATIVE_ARCH
469 	if (port >= 0x80 && port < 0x88) /* POST status register */
470 		return (0);
471 
472 	if ((port & 1) != 0) {
473 		val = iodev_read_1(port);
474 		val |= iodev_read_2(port + 1) << 8;
475 		val |= iodev_read_1(port + 3) << 24;
476 	} else if ((port & 2) != 0) {
477 		val = iodev_read_2(port);
478 		val |= iodev_read_2(port + 2) << 16;
479 	} else
480 #endif
481 	val = iodev_read_4(port);
482 
483 	return (val);
484 }
485 
486 static void
487 x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val)
488 {
489 
490 #ifndef X86BIOS_NATIVE_ARCH
491 	if (port == 0xb2) /* APM scratch register */
492 		return;
493 	if (port >= 0x80 && port < 0x88) /* POST status register */
494 		return;
495 #endif
496 
497 	iodev_write_1(port, val);
498 }
499 
500 static void
501 x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val)
502 {
503 
504 #ifndef X86BIOS_NATIVE_ARCH
505 	if (port >= 0x80 && port < 0x88) /* POST status register */
506 		return;
507 
508 	if ((port & 1) != 0) {
509 		iodev_write_1(port, val);
510 		iodev_write_1(port + 1, val >> 8);
511 	} else
512 #endif
513 	iodev_write_2(port, val);
514 }
515 
516 static void
517 x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val)
518 {
519 
520 #ifndef X86BIOS_NATIVE_ARCH
521 	if (port >= 0x80 && port < 0x88) /* POST status register */
522 		return;
523 
524 	if ((port & 1) != 0) {
525 		iodev_write_1(port, val);
526 		iodev_write_2(port + 1, val >> 8);
527 		iodev_write_1(port + 3, val >> 24);
528 	} else if ((port & 2) != 0) {
529 		iodev_write_2(port, val);
530 		iodev_write_2(port + 2, val >> 16);
531 	} else
532 #endif
533 	iodev_write_4(port, val);
534 }
535 
536 static void
537 x86bios_emu_get_intr(struct x86emu *emu, int intno)
538 {
539 	uint16_t *sp;
540 	uint32_t iv;
541 
542 	emu->x86.R_SP -= 6;
543 
544 	sp = (uint16_t *)((vm_offset_t)x86bios_seg + emu->x86.R_SP);
545 	sp[0] = htole16(emu->x86.R_IP);
546 	sp[1] = htole16(emu->x86.R_CS);
547 	sp[2] = htole16(emu->x86.R_FLG);
548 
549 	iv = x86bios_get_intr(intno);
550 	emu->x86.R_IP = iv & 0xffff;
551 	emu->x86.R_CS = (iv >> 16) & 0xffff;
552 	emu->x86.R_FLG &= ~(F_IF | F_TF);
553 }
554 
555 void *
556 x86bios_alloc(uint32_t *offset, size_t size, int flags)
557 {
558 	void *vaddr;
559 
560 	if (offset == NULL || size == 0)
561 		return (NULL);
562 	vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE,
563 	    x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
564 	if (vaddr != NULL) {
565 		*offset = vtophys(vaddr);
566 		mtx_lock(&x86bios_lock);
567 		x86bios_set_pages((vm_offset_t)vaddr, *offset, size);
568 		mtx_unlock(&x86bios_lock);
569 	}
570 
571 	return (vaddr);
572 }
573 
574 void
575 x86bios_free(void *addr, size_t size)
576 {
577 	vm_paddr_t paddr;
578 
579 	if (addr == NULL || size == 0)
580 		return;
581 	paddr = vtophys(addr);
582 	if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys ||
583 	    paddr % X86BIOS_PAGE_SIZE != 0)
584 		return;
585 	mtx_lock(&x86bios_lock);
586 	bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE,
587 	    sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE));
588 	mtx_unlock(&x86bios_lock);
589 	contigfree(addr, size, M_DEVBUF);
590 }
591 
592 void
593 x86bios_init_regs(struct x86regs *regs)
594 {
595 
596 	bzero(regs, sizeof(*regs));
597 	regs->X86BIOS_R_SS = X86BIOS_PHYSTOSEG(x86bios_seg_phys);
598 	regs->X86BIOS_R_SP = X86BIOS_PAGE_SIZE - 2;
599 }
600 
601 void
602 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
603 {
604 
605 	if (x86bios_trace_call)
606 		X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
607 
608 	mtx_lock(&x86bios_lock);
609 	memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
610 	x86bios_fault = 0;
611 	spinlock_enter();
612 	x86emu_exec_call(&x86bios_emu, seg, off);
613 	spinlock_exit();
614 	memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
615 	mtx_unlock(&x86bios_lock);
616 
617 	if (x86bios_trace_call) {
618 		X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
619 		if (x86bios_fault)
620 			printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
621 			    x86bios_fault_addr, x86bios_fault_cs,
622 			    x86bios_fault_ip);
623 	}
624 }
625 
626 uint32_t
627 x86bios_get_intr(int intno)
628 {
629 
630 	return (le32toh(*((uint32_t *)x86bios_ivt + intno)));
631 }
632 
633 void
634 x86bios_set_intr(int intno, uint32_t saddr)
635 {
636 
637 	*((uint32_t *)x86bios_ivt + intno) = htole32(saddr);
638 }
639 
640 void
641 x86bios_intr(struct x86regs *regs, int intno)
642 {
643 
644 	if (intno < 0 || intno > 255)
645 		return;
646 
647 	if (x86bios_trace_int)
648 		X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
649 
650 	mtx_lock(&x86bios_lock);
651 	memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
652 	x86bios_fault = 0;
653 	spinlock_enter();
654 	x86emu_exec_intr(&x86bios_emu, intno);
655 	spinlock_exit();
656 	memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
657 	mtx_unlock(&x86bios_lock);
658 
659 	if (x86bios_trace_int) {
660 		X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
661 		if (x86bios_fault)
662 			printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
663 			    x86bios_fault_addr, x86bios_fault_cs,
664 			    x86bios_fault_ip);
665 	}
666 }
667 
668 void *
669 x86bios_offset(uint32_t offset)
670 {
671 
672 	return (x86bios_get_pages(offset, 1));
673 }
674 
675 static __inline void
676 x86bios_unmap_mem(void)
677 {
678 
679 	free(x86bios_map, M_DEVBUF);
680 	if (x86bios_ivt != NULL)
681 #ifdef X86BIOS_NATIVE_ARCH
682 		pmap_unmapbios((vm_offset_t)x86bios_ivt, X86BIOS_IVT_SIZE);
683 #else
684 		free(x86bios_ivt, M_DEVBUF);
685 #endif
686 	if (x86bios_rom != NULL)
687 		pmap_unmapdev((vm_offset_t)x86bios_rom, X86BIOS_ROM_SIZE);
688 	if (x86bios_seg != NULL)
689 		contigfree(x86bios_seg, X86BIOS_SEG_SIZE, M_DEVBUF);
690 }
691 
692 static __inline int
693 x86bios_map_mem(void)
694 {
695 
696 	x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF,
697 	    M_WAITOK | M_ZERO);
698 
699 #ifdef X86BIOS_NATIVE_ARCH
700 	x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE);
701 
702 	/* Probe EBDA via BDA. */
703 	x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e);
704 	x86bios_rom_phys = x86bios_rom_phys << 4;
705 	if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE &&
706 	    X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024)
707 		x86bios_rom_phys =
708 		    rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE);
709 	else
710 #else
711 	x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_ZERO | M_WAITOK);
712 #endif
713 
714 	x86bios_rom_phys = X86BIOS_ROM_BASE;
715 	x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE);
716 	if (x86bios_rom == NULL)
717 		goto fail;
718 #ifdef X86BIOS_NATIVE_ARCH
719 	/* Change attribute for EBDA. */
720 	if (x86bios_rom_phys < X86BIOS_ROM_BASE &&
721 	    pmap_change_attr((vm_offset_t)x86bios_rom,
722 	    X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0)
723 		goto fail;
724 #endif
725 
726 	x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_WAITOK,
727 	    X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
728 	x86bios_seg_phys = vtophys(x86bios_seg);
729 
730 	x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE,
731 	    X86BIOS_IVT_SIZE);
732 	x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys,
733 	    X86BIOS_ROM_SIZE);
734 	x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys,
735 	    X86BIOS_SEG_SIZE);
736 
737 	if (bootverbose) {
738 		printf("x86bios:  IVT 0x%06jx-0x%06jx at %p\n",
739 		    (vm_paddr_t)X86BIOS_IVT_BASE,
740 		    (vm_paddr_t)X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1,
741 		    x86bios_ivt);
742 		printf("x86bios: SSEG 0x%06jx-0x%06jx at %p\n",
743 		    x86bios_seg_phys,
744 		    (vm_paddr_t)X86BIOS_SEG_SIZE + x86bios_seg_phys - 1,
745 		    x86bios_seg);
746 		if (x86bios_rom_phys < X86BIOS_ROM_BASE)
747 			printf("x86bios: EBDA 0x%06jx-0x%06jx at %p\n",
748 			    x86bios_rom_phys, (vm_paddr_t)X86BIOS_ROM_BASE - 1,
749 			    x86bios_rom);
750 		printf("x86bios:  ROM 0x%06jx-0x%06jx at %p\n",
751 		    (vm_paddr_t)X86BIOS_ROM_BASE,
752 		    (vm_paddr_t)X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1,
753 		    (caddr_t)x86bios_rom + X86BIOS_ROM_BASE - x86bios_rom_phys);
754 	}
755 
756 	return (0);
757 
758 fail:
759 	x86bios_unmap_mem();
760 
761 	return (1);
762 }
763 
764 static int
765 x86bios_init(void)
766 {
767 	int i;
768 
769 	mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
770 
771 	if (x86bios_map_mem() != 0)
772 		return (ENOMEM);
773 
774 	bzero(&x86bios_emu, sizeof(x86bios_emu));
775 
776 	x86bios_emu.emu_rdb = x86bios_emu_rdb;
777 	x86bios_emu.emu_rdw = x86bios_emu_rdw;
778 	x86bios_emu.emu_rdl = x86bios_emu_rdl;
779 	x86bios_emu.emu_wrb = x86bios_emu_wrb;
780 	x86bios_emu.emu_wrw = x86bios_emu_wrw;
781 	x86bios_emu.emu_wrl = x86bios_emu_wrl;
782 
783 	x86bios_emu.emu_inb = x86bios_emu_inb;
784 	x86bios_emu.emu_inw = x86bios_emu_inw;
785 	x86bios_emu.emu_inl = x86bios_emu_inl;
786 	x86bios_emu.emu_outb = x86bios_emu_outb;
787 	x86bios_emu.emu_outw = x86bios_emu_outw;
788 	x86bios_emu.emu_outl = x86bios_emu_outl;
789 
790 	for (i = 0; i < 256; i++)
791 		x86bios_emu._x86emu_intrTab[i] = x86bios_emu_get_intr;
792 
793 	return (0);
794 }
795 
796 static int
797 x86bios_uninit(void)
798 {
799 
800 	x86bios_unmap_mem();
801 	mtx_destroy(&x86bios_lock);
802 
803 	return (0);
804 }
805 
806 #endif
807 
808 void *
809 x86bios_get_orm(uint32_t offset)
810 {
811 	uint8_t *p;
812 
813 	/* Does the shadow ROM contain BIOS POST code for x86? */
814 	p = x86bios_offset(offset);
815 	if (p == NULL || p[0] != 0x55 || p[1] != 0xaa ||
816 	    (p[3] != 0xe9 && p[3] != 0xeb))
817 		return (NULL);
818 
819 	return (p);
820 }
821 
822 int
823 x86bios_match_device(uint32_t offset, device_t dev)
824 {
825 	uint8_t *p;
826 	uint16_t device, vendor;
827 	uint8_t class, progif, subclass;
828 
829 	/* Does the shadow ROM contain BIOS POST code for x86? */
830 	p = x86bios_get_orm(offset);
831 	if (p == NULL)
832 		return (0);
833 
834 	/* Does it contain PCI data structure? */
835 	p += le16toh(*(uint16_t *)(p + 0x18));
836 	if (bcmp(p, "PCIR", 4) != 0 ||
837 	    le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0)
838 		return (0);
839 
840 	/* Does it match the vendor, device, and classcode? */
841 	vendor = le16toh(*(uint16_t *)(p + 0x04));
842 	device = le16toh(*(uint16_t *)(p + 0x06));
843 	progif = *(p + 0x0d);
844 	subclass = *(p + 0x0e);
845 	class = *(p + 0x0f);
846 	if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) ||
847 	    class != pci_get_class(dev) || subclass != pci_get_subclass(dev) ||
848 	    progif != pci_get_progif(dev))
849 		return (0);
850 
851 	return (1);
852 }
853 
854 static int
855 x86bios_modevent(module_t mod __unused, int type, void *data __unused)
856 {
857 
858 	switch (type) {
859 	case MOD_LOAD:
860 		return (x86bios_init());
861 	case MOD_UNLOAD:
862 		return (x86bios_uninit());
863 	default:
864 		return (ENOTSUP);
865 	}
866 }
867 
868 static moduledata_t x86bios_mod = {
869 	"x86bios",
870 	x86bios_modevent,
871 	NULL,
872 };
873 
874 DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY);
875 MODULE_VERSION(x86bios, 1);
876