xref: /freebsd/sys/compat/x86bios/x86bios.c (revision 8d20be1e22095c27faf8fe8b2f0d089739cc742e)
1 /*-
2  * Copyright (c) 2009 Alex Keda <admin@lissyara.su>
3  * Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_x86bios.h"
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/sysctl.h>
41 
42 #include <contrib/x86emu/x86emu.h>
43 #include <contrib/x86emu/x86emu_regs.h>
44 #include <compat/x86bios/x86bios.h>
45 
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcivar.h>
48 
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 
52 #ifdef __amd64__
53 #define	X86BIOS_NATIVE_ARCH
54 #endif
55 #ifdef __i386__
56 #define	X86BIOS_NATIVE_VM86
57 #endif
58 
59 #define	X86BIOS_MEM_SIZE	0x00100000	/* 1M */
60 
61 #define	X86BIOS_TRACE(h, n, r)	do {					\
62 	printf(__STRING(h)						\
63 	    " (ax=0x%04x bx=0x%04x cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",\
64 	    (n), (r)->R_AX, (r)->R_BX, (r)->R_CX, (r)->R_DX,		\
65 	    (r)->R_ES, (r)->R_DI);					\
66 } while (0)
67 
68 static struct mtx x86bios_lock;
69 
70 static SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL,
71     "x86bios debugging");
72 static int x86bios_trace_call;
73 TUNABLE_INT("debug.x86bios.call", &x86bios_trace_call);
74 SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RW, &x86bios_trace_call, 0,
75     "Trace far function calls");
76 static int x86bios_trace_int;
77 TUNABLE_INT("debug.x86bios.int", &x86bios_trace_int);
78 SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RW, &x86bios_trace_int, 0,
79     "Trace software interrupt handlers");
80 
81 #ifdef X86BIOS_NATIVE_VM86
82 
83 #include <machine/vm86.h>
84 #include <machine/vmparam.h>
85 #include <machine/pc/bios.h>
86 
87 struct vm86context x86bios_vmc;
88 
89 static void
90 x86bios_emu2vmf(struct x86emu_regs *regs, struct vm86frame *vmf)
91 {
92 
93 	vmf->vmf_ds = regs->R_DS;
94 	vmf->vmf_es = regs->R_ES;
95 	vmf->vmf_ax = regs->R_AX;
96 	vmf->vmf_bx = regs->R_BX;
97 	vmf->vmf_cx = regs->R_CX;
98 	vmf->vmf_dx = regs->R_DX;
99 	vmf->vmf_bp = regs->R_BP;
100 	vmf->vmf_si = regs->R_SI;
101 	vmf->vmf_di = regs->R_DI;
102 }
103 
104 static void
105 x86bios_vmf2emu(struct vm86frame *vmf, struct x86emu_regs *regs)
106 {
107 
108 	regs->R_DS = vmf->vmf_ds;
109 	regs->R_ES = vmf->vmf_es;
110 	regs->R_FLG = vmf->vmf_flags;
111 	regs->R_AX = vmf->vmf_ax;
112 	regs->R_BX = vmf->vmf_bx;
113 	regs->R_CX = vmf->vmf_cx;
114 	regs->R_DX = vmf->vmf_dx;
115 	regs->R_BP = vmf->vmf_bp;
116 	regs->R_SI = vmf->vmf_si;
117 	regs->R_DI = vmf->vmf_di;
118 }
119 
120 void *
121 x86bios_alloc(uint32_t *offset, size_t size, int flags)
122 {
123 	void *vaddr;
124 	int i;
125 
126 	if (offset == NULL || size == 0)
127 		return (NULL);
128 	vaddr = contigmalloc(size, M_DEVBUF, flags, 0, X86BIOS_MEM_SIZE,
129 	    PAGE_SIZE, 0);
130 	if (vaddr != NULL) {
131 		*offset = vtophys(vaddr);
132 		mtx_lock(&x86bios_lock);
133 		for (i = 0; i < atop(round_page(size)); i++)
134 			vm86_addpage(&x86bios_vmc, atop(*offset) + i,
135 			    (vm_offset_t)vaddr + ptoa(i));
136 		mtx_unlock(&x86bios_lock);
137 	}
138 
139 	return (vaddr);
140 }
141 
142 void
143 x86bios_free(void *addr, size_t size)
144 {
145 	vm_paddr_t paddr;
146 	int i, nfree;
147 
148 	if (addr == NULL || size == 0)
149 		return;
150 	paddr = vtophys(addr);
151 	if (paddr >= X86BIOS_MEM_SIZE || (paddr & PAGE_MASK) != 0)
152 		return;
153 	mtx_lock(&x86bios_lock);
154 	for (i = 0; i < x86bios_vmc.npages; i++)
155 		if (x86bios_vmc.pmap[i].kva == (vm_offset_t)addr)
156 			break;
157 	if (i >= x86bios_vmc.npages) {
158 		mtx_unlock(&x86bios_lock);
159 		return;
160 	}
161 	nfree = atop(round_page(size));
162 	bzero(x86bios_vmc.pmap + i, sizeof(*x86bios_vmc.pmap) * nfree);
163 	if (i + nfree == x86bios_vmc.npages) {
164 		x86bios_vmc.npages -= nfree;
165 		while (--i >= 0 && x86bios_vmc.pmap[i].kva == 0)
166 			x86bios_vmc.npages--;
167 	}
168 	mtx_unlock(&x86bios_lock);
169 	contigfree(addr, size, M_DEVBUF);
170 }
171 
172 void
173 x86bios_init_regs(struct x86regs *regs)
174 {
175 
176 	bzero(regs, sizeof(*regs));
177 }
178 
179 void
180 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
181 {
182 	struct vm86frame vmf;
183 
184 	if (x86bios_trace_call)
185 		X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
186 
187 	bzero(&vmf, sizeof(vmf));
188 	x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
189 	vmf.vmf_cs = seg;
190 	vmf.vmf_ip = off;
191 	mtx_lock(&x86bios_lock);
192 	vm86_datacall(-1, &vmf, &x86bios_vmc);
193 	mtx_unlock(&x86bios_lock);
194 	x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
195 
196 	if (x86bios_trace_call)
197 		X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
198 }
199 
200 uint32_t
201 x86bios_get_intr(int intno)
202 {
203 
204 	return (readl(BIOS_PADDRTOVADDR(intno * 4)));
205 }
206 
207 void
208 x86bios_set_intr(int intno, uint32_t saddr)
209 {
210 
211 	writel(BIOS_PADDRTOVADDR(intno * 4), saddr);
212 }
213 
214 void
215 x86bios_intr(struct x86regs *regs, int intno)
216 {
217 	struct vm86frame vmf;
218 
219 	if (x86bios_trace_int)
220 		X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
221 
222 	bzero(&vmf, sizeof(vmf));
223 	x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
224 	mtx_lock(&x86bios_lock);
225 	vm86_datacall(intno, &vmf, &x86bios_vmc);
226 	mtx_unlock(&x86bios_lock);
227 	x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
228 
229 	if (x86bios_trace_int)
230 		X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
231 }
232 
233 void *
234 x86bios_offset(uint32_t offset)
235 {
236 	vm_offset_t addr;
237 
238 	addr = vm86_getaddr(&x86bios_vmc, X86BIOS_PHYSTOSEG(offset),
239 	    X86BIOS_PHYSTOOFF(offset));
240 	if (addr == 0)
241 		addr = BIOS_PADDRTOVADDR(offset);
242 
243 	return ((void *)addr);
244 }
245 
246 static int
247 x86bios_init(void)
248 {
249 
250 	mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
251 	bzero(&x86bios_vmc, sizeof(x86bios_vmc));
252 
253 	return (0);
254 }
255 
256 static int
257 x86bios_uninit(void)
258 {
259 
260 	mtx_destroy(&x86bios_lock);
261 
262 	return (0);
263 }
264 
265 #else
266 
267 #include <machine/iodev.h>
268 
269 #define	X86BIOS_PAGE_SIZE	0x00001000	/* 4K */
270 
271 #define	X86BIOS_IVT_SIZE	0x00000500	/* 1K + 256 (BDA) */
272 
273 #define	X86BIOS_IVT_BASE	0x00000000
274 #define	X86BIOS_RAM_BASE	0x00001000
275 #define	X86BIOS_ROM_BASE	0x000a0000
276 
277 #define	X86BIOS_ROM_SIZE	(X86BIOS_MEM_SIZE - x86bios_rom_phys)
278 #define	X86BIOS_SEG_SIZE	X86BIOS_PAGE_SIZE
279 
280 #define	X86BIOS_PAGES		(X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE)
281 
282 #define	X86BIOS_R_SS		_pad2
283 #define	X86BIOS_R_SP		_pad3.I16_reg.x_reg
284 
285 static struct x86emu x86bios_emu;
286 
287 static void *x86bios_ivt;
288 static void *x86bios_rom;
289 static void *x86bios_seg;
290 
291 static vm_offset_t *x86bios_map;
292 
293 static vm_paddr_t x86bios_rom_phys;
294 static vm_paddr_t x86bios_seg_phys;
295 
296 static int x86bios_fault;
297 static uint32_t x86bios_fault_addr;
298 static uint16_t x86bios_fault_cs;
299 static uint16_t x86bios_fault_ip;
300 
301 static void
302 x86bios_set_fault(struct x86emu *emu, uint32_t addr)
303 {
304 
305 	x86bios_fault = 1;
306 	x86bios_fault_addr = addr;
307 	x86bios_fault_cs = emu->x86.R_CS;
308 	x86bios_fault_ip = emu->x86.R_IP;
309 	x86emu_halt_sys(emu);
310 }
311 
312 static void *
313 x86bios_get_pages(uint32_t offset, size_t size)
314 {
315 	vm_offset_t addr;
316 
317 	if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE)
318 		return (NULL);
319 
320 	if (offset >= X86BIOS_MEM_SIZE)
321 		offset -= X86BIOS_MEM_SIZE;
322 	addr = x86bios_map[offset / X86BIOS_PAGE_SIZE];
323 	if (addr != 0)
324 		addr += offset % X86BIOS_PAGE_SIZE;
325 
326 	return ((void *)addr);
327 }
328 
329 static void
330 x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size)
331 {
332 	int i, j;
333 
334 	for (i = pa / X86BIOS_PAGE_SIZE, j = 0;
335 	    j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++)
336 		x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE;
337 }
338 
339 static uint8_t
340 x86bios_emu_rdb(struct x86emu *emu, uint32_t addr)
341 {
342 	uint8_t *va;
343 
344 	va = x86bios_get_pages(addr, sizeof(*va));
345 	if (va == NULL)
346 		x86bios_set_fault(emu, addr);
347 
348 	return (*va);
349 }
350 
351 static uint16_t
352 x86bios_emu_rdw(struct x86emu *emu, uint32_t addr)
353 {
354 	uint16_t *va;
355 
356 	va = x86bios_get_pages(addr, sizeof(*va));
357 	if (va == NULL)
358 		x86bios_set_fault(emu, addr);
359 
360 #ifndef __NO_STRICT_ALIGNMENT
361 	if ((addr & 1) != 0)
362 		return (le16dec(va));
363 	else
364 #endif
365 	return (le16toh(*va));
366 }
367 
368 static uint32_t
369 x86bios_emu_rdl(struct x86emu *emu, uint32_t addr)
370 {
371 	uint32_t *va;
372 
373 	va = x86bios_get_pages(addr, sizeof(*va));
374 	if (va == NULL)
375 		x86bios_set_fault(emu, addr);
376 
377 #ifndef __NO_STRICT_ALIGNMENT
378 	if ((addr & 3) != 0)
379 		return (le32dec(va));
380 	else
381 #endif
382 	return (le32toh(*va));
383 }
384 
385 static void
386 x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val)
387 {
388 	uint8_t *va;
389 
390 	va = x86bios_get_pages(addr, sizeof(*va));
391 	if (va == NULL)
392 		x86bios_set_fault(emu, addr);
393 
394 	*va = val;
395 }
396 
397 static void
398 x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val)
399 {
400 	uint16_t *va;
401 
402 	va = x86bios_get_pages(addr, sizeof(*va));
403 	if (va == NULL)
404 		x86bios_set_fault(emu, addr);
405 
406 #ifndef __NO_STRICT_ALIGNMENT
407 	if ((addr & 1) != 0)
408 		le16enc(va, val);
409 	else
410 #endif
411 	*va = htole16(val);
412 }
413 
414 static void
415 x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val)
416 {
417 	uint32_t *va;
418 
419 	va = x86bios_get_pages(addr, sizeof(*va));
420 	if (va == NULL)
421 		x86bios_set_fault(emu, addr);
422 
423 #ifndef __NO_STRICT_ALIGNMENT
424 	if ((addr & 3) != 0)
425 		le32enc(va, val);
426 	else
427 #endif
428 	*va = htole32(val);
429 }
430 
431 static uint8_t
432 x86bios_emu_inb(struct x86emu *emu, uint16_t port)
433 {
434 
435 #ifndef X86BIOS_NATIVE_ARCH
436 	if (port == 0xb2) /* APM scratch register */
437 		return (0);
438 	if (port >= 0x80 && port < 0x88) /* POST status register */
439 		return (0);
440 #endif
441 
442 	return (iodev_read_1(port));
443 }
444 
445 static uint16_t
446 x86bios_emu_inw(struct x86emu *emu, uint16_t port)
447 {
448 	uint16_t val;
449 
450 #ifndef X86BIOS_NATIVE_ARCH
451 	if (port >= 0x80 && port < 0x88) /* POST status register */
452 		return (0);
453 
454 	if ((port & 1) != 0) {
455 		val = iodev_read_1(port);
456 		val |= iodev_read_1(port + 1) << 8;
457 	} else
458 #endif
459 	val = iodev_read_2(port);
460 
461 	return (val);
462 }
463 
464 static uint32_t
465 x86bios_emu_inl(struct x86emu *emu, uint16_t port)
466 {
467 	uint32_t val;
468 
469 #ifndef X86BIOS_NATIVE_ARCH
470 	if (port >= 0x80 && port < 0x88) /* POST status register */
471 		return (0);
472 
473 	if ((port & 1) != 0) {
474 		val = iodev_read_1(port);
475 		val |= iodev_read_2(port + 1) << 8;
476 		val |= iodev_read_1(port + 3) << 24;
477 	} else if ((port & 2) != 0) {
478 		val = iodev_read_2(port);
479 		val |= iodev_read_2(port + 2) << 16;
480 	} else
481 #endif
482 	val = iodev_read_4(port);
483 
484 	return (val);
485 }
486 
487 static void
488 x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val)
489 {
490 
491 #ifndef X86BIOS_NATIVE_ARCH
492 	if (port == 0xb2) /* APM scratch register */
493 		return;
494 	if (port >= 0x80 && port < 0x88) /* POST status register */
495 		return;
496 #endif
497 
498 	iodev_write_1(port, val);
499 }
500 
501 static void
502 x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val)
503 {
504 
505 #ifndef X86BIOS_NATIVE_ARCH
506 	if (port >= 0x80 && port < 0x88) /* POST status register */
507 		return;
508 
509 	if ((port & 1) != 0) {
510 		iodev_write_1(port, val);
511 		iodev_write_1(port + 1, val >> 8);
512 	} else
513 #endif
514 	iodev_write_2(port, val);
515 }
516 
517 static void
518 x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val)
519 {
520 
521 #ifndef X86BIOS_NATIVE_ARCH
522 	if (port >= 0x80 && port < 0x88) /* POST status register */
523 		return;
524 
525 	if ((port & 1) != 0) {
526 		iodev_write_1(port, val);
527 		iodev_write_2(port + 1, val >> 8);
528 		iodev_write_1(port + 3, val >> 24);
529 	} else if ((port & 2) != 0) {
530 		iodev_write_2(port, val);
531 		iodev_write_2(port + 2, val >> 16);
532 	} else
533 #endif
534 	iodev_write_4(port, val);
535 }
536 
537 void *
538 x86bios_alloc(uint32_t *offset, size_t size, int flags)
539 {
540 	void *vaddr;
541 
542 	if (offset == NULL || size == 0)
543 		return (NULL);
544 	vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE,
545 	    x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
546 	if (vaddr != NULL) {
547 		*offset = vtophys(vaddr);
548 		mtx_lock(&x86bios_lock);
549 		x86bios_set_pages((vm_offset_t)vaddr, *offset, size);
550 		mtx_unlock(&x86bios_lock);
551 	}
552 
553 	return (vaddr);
554 }
555 
556 void
557 x86bios_free(void *addr, size_t size)
558 {
559 	vm_paddr_t paddr;
560 
561 	if (addr == NULL || size == 0)
562 		return;
563 	paddr = vtophys(addr);
564 	if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys ||
565 	    paddr % X86BIOS_PAGE_SIZE != 0)
566 		return;
567 	mtx_lock(&x86bios_lock);
568 	bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE,
569 	    sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE));
570 	mtx_unlock(&x86bios_lock);
571 	contigfree(addr, size, M_DEVBUF);
572 }
573 
574 void
575 x86bios_init_regs(struct x86regs *regs)
576 {
577 
578 	bzero(regs, sizeof(*regs));
579 	regs->X86BIOS_R_SS = X86BIOS_PHYSTOSEG(x86bios_seg_phys);
580 	regs->X86BIOS_R_SP = X86BIOS_PAGE_SIZE - 2;
581 }
582 
583 void
584 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
585 {
586 
587 	if (x86bios_trace_call)
588 		X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
589 
590 	mtx_lock(&x86bios_lock);
591 	memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
592 	x86bios_fault = 0;
593 	spinlock_enter();
594 	x86emu_exec_call(&x86bios_emu, seg, off);
595 	spinlock_exit();
596 	memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
597 	mtx_unlock(&x86bios_lock);
598 
599 	if (x86bios_trace_call) {
600 		X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
601 		if (x86bios_fault)
602 			printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
603 			    x86bios_fault_addr, x86bios_fault_cs,
604 			    x86bios_fault_ip);
605 	}
606 }
607 
608 uint32_t
609 x86bios_get_intr(int intno)
610 {
611 
612 	return (le32toh(*((uint32_t *)x86bios_ivt + intno)));
613 }
614 
615 void
616 x86bios_set_intr(int intno, uint32_t saddr)
617 {
618 
619 	*((uint32_t *)x86bios_ivt + intno) = htole32(saddr);
620 }
621 
622 void
623 x86bios_intr(struct x86regs *regs, int intno)
624 {
625 
626 	if (intno < 0 || intno > 255)
627 		return;
628 
629 	if (x86bios_trace_int)
630 		X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
631 
632 	mtx_lock(&x86bios_lock);
633 	memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
634 	x86bios_fault = 0;
635 	spinlock_enter();
636 	x86emu_exec_intr(&x86bios_emu, intno);
637 	spinlock_exit();
638 	memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
639 	mtx_unlock(&x86bios_lock);
640 
641 	if (x86bios_trace_int) {
642 		X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
643 		if (x86bios_fault)
644 			printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
645 			    x86bios_fault_addr, x86bios_fault_cs,
646 			    x86bios_fault_ip);
647 	}
648 }
649 
650 void *
651 x86bios_offset(uint32_t offset)
652 {
653 
654 	return (x86bios_get_pages(offset, 1));
655 }
656 
657 static __inline void
658 x86bios_unmap_mem(void)
659 {
660 
661 	free(x86bios_map, M_DEVBUF);
662 	if (x86bios_ivt != NULL)
663 #ifdef X86BIOS_NATIVE_ARCH
664 		pmap_unmapbios((vm_offset_t)x86bios_ivt, X86BIOS_IVT_SIZE);
665 #else
666 		free(x86bios_ivt, M_DEVBUF);
667 #endif
668 	if (x86bios_rom != NULL)
669 		pmap_unmapdev((vm_offset_t)x86bios_rom, X86BIOS_ROM_SIZE);
670 	if (x86bios_seg != NULL)
671 		contigfree(x86bios_seg, X86BIOS_SEG_SIZE, M_DEVBUF);
672 }
673 
674 static __inline int
675 x86bios_map_mem(void)
676 {
677 
678 	x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF,
679 	    M_WAITOK | M_ZERO);
680 
681 #ifdef X86BIOS_NATIVE_ARCH
682 	x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE);
683 
684 	/* Probe EBDA via BDA. */
685 	x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e);
686 	x86bios_rom_phys = x86bios_rom_phys << 4;
687 	if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE &&
688 	    X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024)
689 		x86bios_rom_phys =
690 		    rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE);
691 	else
692 #else
693 	x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_ZERO | M_WAITOK);
694 #endif
695 
696 	x86bios_rom_phys = X86BIOS_ROM_BASE;
697 	x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE);
698 	if (x86bios_rom == NULL)
699 		goto fail;
700 #ifdef X86BIOS_NATIVE_ARCH
701 	/* Change attribute for EBDA. */
702 	if (x86bios_rom_phys < X86BIOS_ROM_BASE &&
703 	    pmap_change_attr((vm_offset_t)x86bios_rom,
704 	    X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0)
705 		goto fail;
706 #endif
707 
708 	x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_WAITOK,
709 	    X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
710 	x86bios_seg_phys = vtophys(x86bios_seg);
711 
712 	x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE,
713 	    X86BIOS_IVT_SIZE);
714 	x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys,
715 	    X86BIOS_ROM_SIZE);
716 	x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys,
717 	    X86BIOS_SEG_SIZE);
718 
719 	if (bootverbose) {
720 		printf("x86bios:  IVT 0x%06jx-0x%06jx at %p\n",
721 		    (vm_paddr_t)X86BIOS_IVT_BASE,
722 		    (vm_paddr_t)X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1,
723 		    x86bios_ivt);
724 		printf("x86bios: SSEG 0x%06jx-0x%06jx at %p\n",
725 		    x86bios_seg_phys,
726 		    (vm_paddr_t)X86BIOS_SEG_SIZE + x86bios_seg_phys - 1,
727 		    x86bios_seg);
728 		if (x86bios_rom_phys < X86BIOS_ROM_BASE)
729 			printf("x86bios: EBDA 0x%06jx-0x%06jx at %p\n",
730 			    x86bios_rom_phys, (vm_paddr_t)X86BIOS_ROM_BASE - 1,
731 			    x86bios_rom);
732 		printf("x86bios:  ROM 0x%06jx-0x%06jx at %p\n",
733 		    (vm_paddr_t)X86BIOS_ROM_BASE,
734 		    (vm_paddr_t)X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1,
735 		    (caddr_t)x86bios_rom + X86BIOS_ROM_BASE - x86bios_rom_phys);
736 	}
737 
738 	return (0);
739 
740 fail:
741 	x86bios_unmap_mem();
742 
743 	return (1);
744 }
745 
746 static int
747 x86bios_init(void)
748 {
749 
750 	mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
751 
752 	if (x86bios_map_mem() != 0)
753 		return (ENOMEM);
754 
755 	bzero(&x86bios_emu, sizeof(x86bios_emu));
756 
757 	x86bios_emu.emu_rdb = x86bios_emu_rdb;
758 	x86bios_emu.emu_rdw = x86bios_emu_rdw;
759 	x86bios_emu.emu_rdl = x86bios_emu_rdl;
760 	x86bios_emu.emu_wrb = x86bios_emu_wrb;
761 	x86bios_emu.emu_wrw = x86bios_emu_wrw;
762 	x86bios_emu.emu_wrl = x86bios_emu_wrl;
763 
764 	x86bios_emu.emu_inb = x86bios_emu_inb;
765 	x86bios_emu.emu_inw = x86bios_emu_inw;
766 	x86bios_emu.emu_inl = x86bios_emu_inl;
767 	x86bios_emu.emu_outb = x86bios_emu_outb;
768 	x86bios_emu.emu_outw = x86bios_emu_outw;
769 	x86bios_emu.emu_outl = x86bios_emu_outl;
770 
771 	return (0);
772 }
773 
774 static int
775 x86bios_uninit(void)
776 {
777 
778 	x86bios_unmap_mem();
779 	mtx_destroy(&x86bios_lock);
780 
781 	return (0);
782 }
783 
784 #endif
785 
786 void *
787 x86bios_get_orm(uint32_t offset)
788 {
789 	uint8_t *p;
790 
791 	/* Does the shadow ROM contain BIOS POST code for x86? */
792 	p = x86bios_offset(offset);
793 	if (p == NULL || p[0] != 0x55 || p[1] != 0xaa ||
794 	    (p[3] != 0xe9 && p[3] != 0xeb))
795 		return (NULL);
796 
797 	return (p);
798 }
799 
800 int
801 x86bios_match_device(uint32_t offset, device_t dev)
802 {
803 	uint8_t *p;
804 	uint16_t device, vendor;
805 	uint8_t class, progif, subclass;
806 
807 	/* Does the shadow ROM contain BIOS POST code for x86? */
808 	p = x86bios_get_orm(offset);
809 	if (p == NULL)
810 		return (0);
811 
812 	/* Does it contain PCI data structure? */
813 	p += le16toh(*(uint16_t *)(p + 0x18));
814 	if (bcmp(p, "PCIR", 4) != 0 ||
815 	    le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0)
816 		return (0);
817 
818 	/* Does it match the vendor, device, and classcode? */
819 	vendor = le16toh(*(uint16_t *)(p + 0x04));
820 	device = le16toh(*(uint16_t *)(p + 0x06));
821 	progif = *(p + 0x0d);
822 	subclass = *(p + 0x0e);
823 	class = *(p + 0x0f);
824 	if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) ||
825 	    class != pci_get_class(dev) || subclass != pci_get_subclass(dev) ||
826 	    progif != pci_get_progif(dev))
827 		return (0);
828 
829 	return (1);
830 }
831 
832 static int
833 x86bios_modevent(module_t mod __unused, int type, void *data __unused)
834 {
835 
836 	switch (type) {
837 	case MOD_LOAD:
838 		return (x86bios_init());
839 	case MOD_UNLOAD:
840 		return (x86bios_uninit());
841 	default:
842 		return (ENOTSUP);
843 	}
844 }
845 
846 static moduledata_t x86bios_mod = {
847 	"x86bios",
848 	x86bios_modevent,
849 	NULL,
850 };
851 
852 DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY);
853 MODULE_VERSION(x86bios, 1);
854