xref: /freebsd/sys/dev/pci/pci.c (revision 5bd73b51076b5cb5a2c9810f76c1d7ed20c4460e)
1 /*-
2  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3  * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4  * Copyright (c) 2000, BSDi
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_bus.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
40 #include <sys/conf.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_extern.h>
49 
50 #include <sys/bus.h>
51 #include <machine/bus.h>
52 #include <sys/rman.h>
53 #include <machine/resource.h>
54 #include <machine/stdarg.h>
55 
56 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
57 #include <machine/intr_machdep.h>
58 #endif
59 
60 #include <sys/pciio.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pci_private.h>
64 
65 #include <dev/usb/controller/xhcireg.h>
66 #include <dev/usb/controller/ehcireg.h>
67 #include <dev/usb/controller/ohcireg.h>
68 #include <dev/usb/controller/uhcireg.h>
69 
70 #include "pcib_if.h"
71 #include "pci_if.h"
72 
73 #define	PCIR_IS_BIOS(cfg, reg)						\
74 	(((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) ||	\
75 	 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
76 
77 static int		pci_has_quirk(uint32_t devid, int quirk);
78 static pci_addr_t	pci_mapbase(uint64_t mapreg);
79 static const char	*pci_maptype(uint64_t mapreg);
80 static int		pci_mapsize(uint64_t testval);
81 static int		pci_maprange(uint64_t mapreg);
82 static pci_addr_t	pci_rombase(uint64_t mapreg);
83 static int		pci_romsize(uint64_t testval);
84 static void		pci_fixancient(pcicfgregs *cfg);
85 static int		pci_printf(pcicfgregs *cfg, const char *fmt, ...);
86 
87 static int		pci_porten(device_t dev);
88 static int		pci_memen(device_t dev);
89 static void		pci_assign_interrupt(device_t bus, device_t dev,
90 			    int force_route);
91 static int		pci_add_map(device_t bus, device_t dev, int reg,
92 			    struct resource_list *rl, int force, int prefetch);
93 static int		pci_probe(device_t dev);
94 static int		pci_attach(device_t dev);
95 #ifdef PCI_RES_BUS
96 static int		pci_detach(device_t dev);
97 #endif
98 static void		pci_load_vendor_data(void);
99 static int		pci_describe_parse_line(char **ptr, int *vendor,
100 			    int *device, char **desc);
101 static char		*pci_describe_device(device_t dev);
102 static int		pci_modevent(module_t mod, int what, void *arg);
103 static void		pci_hdrtypedata(device_t pcib, int b, int s, int f,
104 			    pcicfgregs *cfg);
105 static void		pci_read_cap(device_t pcib, pcicfgregs *cfg);
106 static int		pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
107 			    int reg, uint32_t *data);
108 #if 0
109 static int		pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
110 			    int reg, uint32_t data);
111 #endif
112 static void		pci_read_vpd(device_t pcib, pcicfgregs *cfg);
113 static void		pci_mask_msix(device_t dev, u_int index);
114 static void		pci_unmask_msix(device_t dev, u_int index);
115 static int		pci_msi_blacklisted(void);
116 static int		pci_msix_blacklisted(void);
117 static void		pci_resume_msi(device_t dev);
118 static void		pci_resume_msix(device_t dev);
119 static int		pci_remap_intr_method(device_t bus, device_t dev,
120 			    u_int irq);
121 
122 static uint16_t		pci_get_rid_method(device_t dev, device_t child);
123 
124 static device_method_t pci_methods[] = {
125 	/* Device interface */
126 	DEVMETHOD(device_probe,		pci_probe),
127 	DEVMETHOD(device_attach,	pci_attach),
128 #ifdef PCI_RES_BUS
129 	DEVMETHOD(device_detach,	pci_detach),
130 #else
131 	DEVMETHOD(device_detach,	bus_generic_detach),
132 #endif
133 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
134 	DEVMETHOD(device_suspend,	bus_generic_suspend),
135 	DEVMETHOD(device_resume,	pci_resume),
136 
137 	/* Bus interface */
138 	DEVMETHOD(bus_print_child,	pci_print_child),
139 	DEVMETHOD(bus_probe_nomatch,	pci_probe_nomatch),
140 	DEVMETHOD(bus_read_ivar,	pci_read_ivar),
141 	DEVMETHOD(bus_write_ivar,	pci_write_ivar),
142 	DEVMETHOD(bus_driver_added,	pci_driver_added),
143 	DEVMETHOD(bus_setup_intr,	pci_setup_intr),
144 	DEVMETHOD(bus_teardown_intr,	pci_teardown_intr),
145 
146 	DEVMETHOD(bus_get_dma_tag,	pci_get_dma_tag),
147 	DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
148 	DEVMETHOD(bus_set_resource,	bus_generic_rl_set_resource),
149 	DEVMETHOD(bus_get_resource,	bus_generic_rl_get_resource),
150 	DEVMETHOD(bus_delete_resource,	pci_delete_resource),
151 	DEVMETHOD(bus_alloc_resource,	pci_alloc_resource),
152 	DEVMETHOD(bus_adjust_resource,	bus_generic_adjust_resource),
153 	DEVMETHOD(bus_release_resource,	pci_release_resource),
154 	DEVMETHOD(bus_activate_resource, pci_activate_resource),
155 	DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
156 	DEVMETHOD(bus_child_detached,	pci_child_detached),
157 	DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
158 	DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
159 	DEVMETHOD(bus_remap_intr,	pci_remap_intr_method),
160 	DEVMETHOD(bus_suspend_child,	pci_suspend_child),
161 	DEVMETHOD(bus_resume_child,	pci_resume_child),
162 
163 	/* PCI interface */
164 	DEVMETHOD(pci_read_config,	pci_read_config_method),
165 	DEVMETHOD(pci_write_config,	pci_write_config_method),
166 	DEVMETHOD(pci_enable_busmaster,	pci_enable_busmaster_method),
167 	DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
168 	DEVMETHOD(pci_enable_io,	pci_enable_io_method),
169 	DEVMETHOD(pci_disable_io,	pci_disable_io_method),
170 	DEVMETHOD(pci_get_vpd_ident,	pci_get_vpd_ident_method),
171 	DEVMETHOD(pci_get_vpd_readonly,	pci_get_vpd_readonly_method),
172 	DEVMETHOD(pci_get_powerstate,	pci_get_powerstate_method),
173 	DEVMETHOD(pci_set_powerstate,	pci_set_powerstate_method),
174 	DEVMETHOD(pci_assign_interrupt,	pci_assign_interrupt_method),
175 	DEVMETHOD(pci_find_cap,		pci_find_cap_method),
176 	DEVMETHOD(pci_find_extcap,	pci_find_extcap_method),
177 	DEVMETHOD(pci_find_htcap,	pci_find_htcap_method),
178 	DEVMETHOD(pci_alloc_msi,	pci_alloc_msi_method),
179 	DEVMETHOD(pci_alloc_msix,	pci_alloc_msix_method),
180 	DEVMETHOD(pci_enable_msi,	pci_enable_msi_method),
181 	DEVMETHOD(pci_enable_msix,	pci_enable_msix_method),
182 	DEVMETHOD(pci_disable_msi,	pci_disable_msi_method),
183 	DEVMETHOD(pci_remap_msix,	pci_remap_msix_method),
184 	DEVMETHOD(pci_release_msi,	pci_release_msi_method),
185 	DEVMETHOD(pci_msi_count,	pci_msi_count_method),
186 	DEVMETHOD(pci_msix_count,	pci_msix_count_method),
187 	DEVMETHOD(pci_get_rid,		pci_get_rid_method),
188 	DEVMETHOD(pci_child_added,	pci_child_added_method),
189 
190 	DEVMETHOD_END
191 };
192 
193 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
194 
195 static devclass_t pci_devclass;
196 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL);
197 MODULE_VERSION(pci, 1);
198 
199 static char	*pci_vendordata;
200 static size_t	pci_vendordata_size;
201 
202 struct pci_quirk {
203 	uint32_t devid;	/* Vendor/device of the card */
204 	int	type;
205 #define	PCI_QUIRK_MAP_REG	1 /* PCI map register in weird place */
206 #define	PCI_QUIRK_DISABLE_MSI	2 /* Neither MSI nor MSI-X work */
207 #define	PCI_QUIRK_ENABLE_MSI_VM	3 /* Older chipset in VM where MSI works */
208 #define	PCI_QUIRK_UNMAP_REG	4 /* Ignore PCI map register */
209 #define	PCI_QUIRK_DISABLE_MSIX	5 /* MSI-X doesn't work */
210 #define	PCI_QUIRK_MSI_INTX_BUG	6 /* PCIM_CMD_INTxDIS disables MSI */
211 	int	arg1;
212 	int	arg2;
213 };
214 
215 static const struct pci_quirk pci_quirks[] = {
216 	/* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
217 	{ 0x71138086, PCI_QUIRK_MAP_REG,	0x90,	 0 },
218 	{ 0x719b8086, PCI_QUIRK_MAP_REG,	0x90,	 0 },
219 	/* As does the Serverworks OSB4 (the SMBus mapping register) */
220 	{ 0x02001166, PCI_QUIRK_MAP_REG,	0x90,	 0 },
221 
222 	/*
223 	 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
224 	 * or the CMIC-SL (AKA ServerWorks GC_LE).
225 	 */
226 	{ 0x00141166, PCI_QUIRK_DISABLE_MSI,	0,	0 },
227 	{ 0x00171166, PCI_QUIRK_DISABLE_MSI,	0,	0 },
228 
229 	/*
230 	 * MSI doesn't work on earlier Intel chipsets including
231 	 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
232 	 */
233 	{ 0x25408086, PCI_QUIRK_DISABLE_MSI,	0,	0 },
234 	{ 0x254c8086, PCI_QUIRK_DISABLE_MSI,	0,	0 },
235 	{ 0x25508086, PCI_QUIRK_DISABLE_MSI,	0,	0 },
236 	{ 0x25608086, PCI_QUIRK_DISABLE_MSI,	0,	0 },
237 	{ 0x25708086, PCI_QUIRK_DISABLE_MSI,	0,	0 },
238 	{ 0x25788086, PCI_QUIRK_DISABLE_MSI,	0,	0 },
239 	{ 0x35808086, PCI_QUIRK_DISABLE_MSI,	0,	0 },
240 
241 	/*
242 	 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
243 	 * bridge.
244 	 */
245 	{ 0x74501022, PCI_QUIRK_DISABLE_MSI,	0,	0 },
246 
247 	/*
248 	 * MSI-X allocation doesn't work properly for devices passed through
249 	 * by VMware up to at least ESXi 5.1.
250 	 */
251 	{ 0x079015ad, PCI_QUIRK_DISABLE_MSIX,	0,	0 }, /* PCI/PCI-X */
252 	{ 0x07a015ad, PCI_QUIRK_DISABLE_MSIX,	0,	0 }, /* PCIe */
253 
254 	/*
255 	 * Some virtualization environments emulate an older chipset
256 	 * but support MSI just fine.  QEMU uses the Intel 82440.
257 	 */
258 	{ 0x12378086, PCI_QUIRK_ENABLE_MSI_VM,	0,	0 },
259 
260 	/*
261 	 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
262 	 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
263 	 * It prevents us from attaching hpet(4) when the bit is unset.
264 	 * Note this quirk only affects SB600 revision A13 and earlier.
265 	 * For SB600 A21 and later, firmware must set the bit to hide it.
266 	 * For SB700 and later, it is unused and hardcoded to zero.
267 	 */
268 	{ 0x43851002, PCI_QUIRK_UNMAP_REG,	0x14,	0 },
269 
270 	/*
271 	 * Atheros AR8161/AR8162/E2200 ethernet controller has a bug that
272 	 * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the
273 	 * command register is set.
274 	 */
275 	{ 0x10911969, PCI_QUIRK_MSI_INTX_BUG,	0,	0 },
276 	{ 0xE0911969, PCI_QUIRK_MSI_INTX_BUG,	0,	0 },
277 	{ 0x10901969, PCI_QUIRK_MSI_INTX_BUG,	0,	0 },
278 
279 	{ 0 }
280 };
281 
282 /* map register information */
283 #define	PCI_MAPMEM	0x01	/* memory map */
284 #define	PCI_MAPMEMP	0x02	/* prefetchable memory map */
285 #define	PCI_MAPPORT	0x04	/* port map */
286 
287 struct devlist pci_devq;
288 uint32_t pci_generation;
289 uint32_t pci_numdevs = 0;
290 static int pcie_chipset, pcix_chipset;
291 
292 /* sysctl vars */
293 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
294 
295 static int pci_enable_io_modes = 1;
296 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN,
297     &pci_enable_io_modes, 1,
298     "Enable I/O and memory bits in the config register.  Some BIOSes do not\n\
299 enable these bits correctly.  We'd like to do this all the time, but there\n\
300 are some peripherals that this causes problems with.");
301 
302 static int pci_do_realloc_bars = 0;
303 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN,
304     &pci_do_realloc_bars, 0,
305     "Attempt to allocate a new range for any BARs whose original "
306     "firmware-assigned ranges fail to allocate during the initial device scan.");
307 
308 static int pci_do_power_nodriver = 0;
309 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN,
310     &pci_do_power_nodriver, 0,
311   "Place a function into D3 state when no driver attaches to it.  0 means\n\
312 disable.  1 means conservatively place devices into D3 state.  2 means\n\
313 agressively place devices into D3 state.  3 means put absolutely everything\n\
314 in D3 state.");
315 
316 int pci_do_power_resume = 1;
317 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN,
318     &pci_do_power_resume, 1,
319   "Transition from D3 -> D0 on resume.");
320 
321 int pci_do_power_suspend = 1;
322 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN,
323     &pci_do_power_suspend, 1,
324   "Transition from D0 -> D3 on suspend.");
325 
326 static int pci_do_msi = 1;
327 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1,
328     "Enable support for MSI interrupts");
329 
330 static int pci_do_msix = 1;
331 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1,
332     "Enable support for MSI-X interrupts");
333 
334 static int pci_honor_msi_blacklist = 1;
335 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN,
336     &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
337 
338 #if defined(__i386__) || defined(__amd64__)
339 static int pci_usb_takeover = 1;
340 #else
341 static int pci_usb_takeover = 0;
342 #endif
343 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
344     &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\
345 Disable this if you depend on BIOS emulation of USB devices, that is\n\
346 you use USB devices (like keyboard or mouse) but do not load USB drivers");
347 
348 static int pci_clear_bars;
349 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
350     "Ignore firmware-assigned resources for BARs.");
351 
352 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
353 static int pci_clear_buses;
354 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
355     "Ignore firmware-assigned bus numbers.");
356 #endif
357 
358 static int pci_enable_ari = 1;
359 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
360     0, "Enable support for PCIe Alternative RID Interpretation");
361 
362 static int
363 pci_has_quirk(uint32_t devid, int quirk)
364 {
365 	const struct pci_quirk *q;
366 
367 	for (q = &pci_quirks[0]; q->devid; q++) {
368 		if (q->devid == devid && q->type == quirk)
369 			return (1);
370 	}
371 	return (0);
372 }
373 
374 /* Find a device_t by bus/slot/function in domain 0 */
375 
376 device_t
377 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
378 {
379 
380 	return (pci_find_dbsf(0, bus, slot, func));
381 }
382 
383 /* Find a device_t by domain/bus/slot/function */
384 
385 device_t
386 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
387 {
388 	struct pci_devinfo *dinfo;
389 
390 	STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
391 		if ((dinfo->cfg.domain == domain) &&
392 		    (dinfo->cfg.bus == bus) &&
393 		    (dinfo->cfg.slot == slot) &&
394 		    (dinfo->cfg.func == func)) {
395 			return (dinfo->cfg.dev);
396 		}
397 	}
398 
399 	return (NULL);
400 }
401 
402 /* Find a device_t by vendor/device ID */
403 
404 device_t
405 pci_find_device(uint16_t vendor, uint16_t device)
406 {
407 	struct pci_devinfo *dinfo;
408 
409 	STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
410 		if ((dinfo->cfg.vendor == vendor) &&
411 		    (dinfo->cfg.device == device)) {
412 			return (dinfo->cfg.dev);
413 		}
414 	}
415 
416 	return (NULL);
417 }
418 
419 device_t
420 pci_find_class(uint8_t class, uint8_t subclass)
421 {
422 	struct pci_devinfo *dinfo;
423 
424 	STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
425 		if (dinfo->cfg.baseclass == class &&
426 		    dinfo->cfg.subclass == subclass) {
427 			return (dinfo->cfg.dev);
428 		}
429 	}
430 
431 	return (NULL);
432 }
433 
434 static int
435 pci_printf(pcicfgregs *cfg, const char *fmt, ...)
436 {
437 	va_list ap;
438 	int retval;
439 
440 	retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
441 	    cfg->func);
442 	va_start(ap, fmt);
443 	retval += vprintf(fmt, ap);
444 	va_end(ap);
445 	return (retval);
446 }
447 
448 /* return base address of memory or port map */
449 
450 static pci_addr_t
451 pci_mapbase(uint64_t mapreg)
452 {
453 
454 	if (PCI_BAR_MEM(mapreg))
455 		return (mapreg & PCIM_BAR_MEM_BASE);
456 	else
457 		return (mapreg & PCIM_BAR_IO_BASE);
458 }
459 
460 /* return map type of memory or port map */
461 
462 static const char *
463 pci_maptype(uint64_t mapreg)
464 {
465 
466 	if (PCI_BAR_IO(mapreg))
467 		return ("I/O Port");
468 	if (mapreg & PCIM_BAR_MEM_PREFETCH)
469 		return ("Prefetchable Memory");
470 	return ("Memory");
471 }
472 
473 /* return log2 of map size decoded for memory or port map */
474 
475 static int
476 pci_mapsize(uint64_t testval)
477 {
478 	int ln2size;
479 
480 	testval = pci_mapbase(testval);
481 	ln2size = 0;
482 	if (testval != 0) {
483 		while ((testval & 1) == 0)
484 		{
485 			ln2size++;
486 			testval >>= 1;
487 		}
488 	}
489 	return (ln2size);
490 }
491 
492 /* return base address of device ROM */
493 
494 static pci_addr_t
495 pci_rombase(uint64_t mapreg)
496 {
497 
498 	return (mapreg & PCIM_BIOS_ADDR_MASK);
499 }
500 
501 /* return log2 of map size decided for device ROM */
502 
503 static int
504 pci_romsize(uint64_t testval)
505 {
506 	int ln2size;
507 
508 	testval = pci_rombase(testval);
509 	ln2size = 0;
510 	if (testval != 0) {
511 		while ((testval & 1) == 0)
512 		{
513 			ln2size++;
514 			testval >>= 1;
515 		}
516 	}
517 	return (ln2size);
518 }
519 
520 /* return log2 of address range supported by map register */
521 
522 static int
523 pci_maprange(uint64_t mapreg)
524 {
525 	int ln2range = 0;
526 
527 	if (PCI_BAR_IO(mapreg))
528 		ln2range = 32;
529 	else
530 		switch (mapreg & PCIM_BAR_MEM_TYPE) {
531 		case PCIM_BAR_MEM_32:
532 			ln2range = 32;
533 			break;
534 		case PCIM_BAR_MEM_1MB:
535 			ln2range = 20;
536 			break;
537 		case PCIM_BAR_MEM_64:
538 			ln2range = 64;
539 			break;
540 		}
541 	return (ln2range);
542 }
543 
544 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
545 
546 static void
547 pci_fixancient(pcicfgregs *cfg)
548 {
549 	if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
550 		return;
551 
552 	/* PCI to PCI bridges use header type 1 */
553 	if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
554 		cfg->hdrtype = PCIM_HDRTYPE_BRIDGE;
555 }
556 
557 /* extract header type specific config data */
558 
559 static void
560 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
561 {
562 #define	REG(n, w)	PCIB_READ_CONFIG(pcib, b, s, f, n, w)
563 	switch (cfg->hdrtype & PCIM_HDRTYPE) {
564 	case PCIM_HDRTYPE_NORMAL:
565 		cfg->subvendor      = REG(PCIR_SUBVEND_0, 2);
566 		cfg->subdevice      = REG(PCIR_SUBDEV_0, 2);
567 		cfg->nummaps	    = PCI_MAXMAPS_0;
568 		break;
569 	case PCIM_HDRTYPE_BRIDGE:
570 		cfg->nummaps	    = PCI_MAXMAPS_1;
571 		break;
572 	case PCIM_HDRTYPE_CARDBUS:
573 		cfg->subvendor      = REG(PCIR_SUBVEND_2, 2);
574 		cfg->subdevice      = REG(PCIR_SUBDEV_2, 2);
575 		cfg->nummaps	    = PCI_MAXMAPS_2;
576 		break;
577 	}
578 #undef REG
579 }
580 
581 /* read configuration header into pcicfgregs structure */
582 struct pci_devinfo *
583 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size)
584 {
585 #define	REG(n, w)	PCIB_READ_CONFIG(pcib, b, s, f, n, w)
586 	pcicfgregs *cfg = NULL;
587 	struct pci_devinfo *devlist_entry;
588 	struct devlist *devlist_head;
589 
590 	devlist_head = &pci_devq;
591 
592 	devlist_entry = NULL;
593 
594 	if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) {
595 		devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
596 		if (devlist_entry == NULL)
597 			return (NULL);
598 
599 		cfg = &devlist_entry->cfg;
600 
601 		cfg->domain		= d;
602 		cfg->bus		= b;
603 		cfg->slot		= s;
604 		cfg->func		= f;
605 		cfg->vendor		= REG(PCIR_VENDOR, 2);
606 		cfg->device		= REG(PCIR_DEVICE, 2);
607 		cfg->cmdreg		= REG(PCIR_COMMAND, 2);
608 		cfg->statreg		= REG(PCIR_STATUS, 2);
609 		cfg->baseclass		= REG(PCIR_CLASS, 1);
610 		cfg->subclass		= REG(PCIR_SUBCLASS, 1);
611 		cfg->progif		= REG(PCIR_PROGIF, 1);
612 		cfg->revid		= REG(PCIR_REVID, 1);
613 		cfg->hdrtype		= REG(PCIR_HDRTYPE, 1);
614 		cfg->cachelnsz		= REG(PCIR_CACHELNSZ, 1);
615 		cfg->lattimer		= REG(PCIR_LATTIMER, 1);
616 		cfg->intpin		= REG(PCIR_INTPIN, 1);
617 		cfg->intline		= REG(PCIR_INTLINE, 1);
618 
619 		cfg->mingnt		= REG(PCIR_MINGNT, 1);
620 		cfg->maxlat		= REG(PCIR_MAXLAT, 1);
621 
622 		cfg->mfdev		= (cfg->hdrtype & PCIM_MFDEV) != 0;
623 		cfg->hdrtype		&= ~PCIM_MFDEV;
624 		STAILQ_INIT(&cfg->maps);
625 
626 		pci_fixancient(cfg);
627 		pci_hdrtypedata(pcib, b, s, f, cfg);
628 
629 		if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
630 			pci_read_cap(pcib, cfg);
631 
632 		STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
633 
634 		devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
635 		devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
636 		devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
637 		devlist_entry->conf.pc_sel.pc_func = cfg->func;
638 		devlist_entry->conf.pc_hdr = cfg->hdrtype;
639 
640 		devlist_entry->conf.pc_subvendor = cfg->subvendor;
641 		devlist_entry->conf.pc_subdevice = cfg->subdevice;
642 		devlist_entry->conf.pc_vendor = cfg->vendor;
643 		devlist_entry->conf.pc_device = cfg->device;
644 
645 		devlist_entry->conf.pc_class = cfg->baseclass;
646 		devlist_entry->conf.pc_subclass = cfg->subclass;
647 		devlist_entry->conf.pc_progif = cfg->progif;
648 		devlist_entry->conf.pc_revid = cfg->revid;
649 
650 		pci_numdevs++;
651 		pci_generation++;
652 	}
653 	return (devlist_entry);
654 #undef REG
655 }
656 
657 static void
658 pci_read_cap(device_t pcib, pcicfgregs *cfg)
659 {
660 #define	REG(n, w)	PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
661 #define	WREG(n, v, w)	PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
662 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
663 	uint64_t addr;
664 #endif
665 	uint32_t val;
666 	int	ptr, nextptr, ptrptr;
667 
668 	switch (cfg->hdrtype & PCIM_HDRTYPE) {
669 	case PCIM_HDRTYPE_NORMAL:
670 	case PCIM_HDRTYPE_BRIDGE:
671 		ptrptr = PCIR_CAP_PTR;
672 		break;
673 	case PCIM_HDRTYPE_CARDBUS:
674 		ptrptr = PCIR_CAP_PTR_2;	/* cardbus capabilities ptr */
675 		break;
676 	default:
677 		return;		/* no extended capabilities support */
678 	}
679 	nextptr = REG(ptrptr, 1);	/* sanity check? */
680 
681 	/*
682 	 * Read capability entries.
683 	 */
684 	while (nextptr != 0) {
685 		/* Sanity check */
686 		if (nextptr > 255) {
687 			printf("illegal PCI extended capability offset %d\n",
688 			    nextptr);
689 			return;
690 		}
691 		/* Find the next entry */
692 		ptr = nextptr;
693 		nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
694 
695 		/* Process this entry */
696 		switch (REG(ptr + PCICAP_ID, 1)) {
697 		case PCIY_PMG:		/* PCI power management */
698 			if (cfg->pp.pp_cap == 0) {
699 				cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
700 				cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
701 				cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
702 				if ((nextptr - ptr) > PCIR_POWER_DATA)
703 					cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
704 			}
705 			break;
706 		case PCIY_HT:		/* HyperTransport */
707 			/* Determine HT-specific capability type. */
708 			val = REG(ptr + PCIR_HT_COMMAND, 2);
709 
710 			if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
711 				cfg->ht.ht_slave = ptr;
712 
713 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
714 			switch (val & PCIM_HTCMD_CAP_MASK) {
715 			case PCIM_HTCAP_MSI_MAPPING:
716 				if (!(val & PCIM_HTCMD_MSI_FIXED)) {
717 					/* Sanity check the mapping window. */
718 					addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
719 					    4);
720 					addr <<= 32;
721 					addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO,
722 					    4);
723 					if (addr != MSI_INTEL_ADDR_BASE)
724 						device_printf(pcib,
725 	    "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
726 						    cfg->domain, cfg->bus,
727 						    cfg->slot, cfg->func,
728 						    (long long)addr);
729 				} else
730 					addr = MSI_INTEL_ADDR_BASE;
731 
732 				cfg->ht.ht_msimap = ptr;
733 				cfg->ht.ht_msictrl = val;
734 				cfg->ht.ht_msiaddr = addr;
735 				break;
736 			}
737 #endif
738 			break;
739 		case PCIY_MSI:		/* PCI MSI */
740 			cfg->msi.msi_location = ptr;
741 			cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
742 			cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
743 						     PCIM_MSICTRL_MMC_MASK)>>1);
744 			break;
745 		case PCIY_MSIX:		/* PCI MSI-X */
746 			cfg->msix.msix_location = ptr;
747 			cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
748 			cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
749 			    PCIM_MSIXCTRL_TABLE_SIZE) + 1;
750 			val = REG(ptr + PCIR_MSIX_TABLE, 4);
751 			cfg->msix.msix_table_bar = PCIR_BAR(val &
752 			    PCIM_MSIX_BIR_MASK);
753 			cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
754 			val = REG(ptr + PCIR_MSIX_PBA, 4);
755 			cfg->msix.msix_pba_bar = PCIR_BAR(val &
756 			    PCIM_MSIX_BIR_MASK);
757 			cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
758 			break;
759 		case PCIY_VPD:		/* PCI Vital Product Data */
760 			cfg->vpd.vpd_reg = ptr;
761 			break;
762 		case PCIY_SUBVENDOR:
763 			/* Should always be true. */
764 			if ((cfg->hdrtype & PCIM_HDRTYPE) ==
765 			    PCIM_HDRTYPE_BRIDGE) {
766 				val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
767 				cfg->subvendor = val & 0xffff;
768 				cfg->subdevice = val >> 16;
769 			}
770 			break;
771 		case PCIY_PCIX:		/* PCI-X */
772 			/*
773 			 * Assume we have a PCI-X chipset if we have
774 			 * at least one PCI-PCI bridge with a PCI-X
775 			 * capability.  Note that some systems with
776 			 * PCI-express or HT chipsets might match on
777 			 * this check as well.
778 			 */
779 			if ((cfg->hdrtype & PCIM_HDRTYPE) ==
780 			    PCIM_HDRTYPE_BRIDGE)
781 				pcix_chipset = 1;
782 			cfg->pcix.pcix_location = ptr;
783 			break;
784 		case PCIY_EXPRESS:	/* PCI-express */
785 			/*
786 			 * Assume we have a PCI-express chipset if we have
787 			 * at least one PCI-express device.
788 			 */
789 			pcie_chipset = 1;
790 			cfg->pcie.pcie_location = ptr;
791 			val = REG(ptr + PCIER_FLAGS, 2);
792 			cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE;
793 			break;
794 		default:
795 			break;
796 		}
797 	}
798 
799 #if defined(__powerpc__)
800 	/*
801 	 * Enable the MSI mapping window for all HyperTransport
802 	 * slaves.  PCI-PCI bridges have their windows enabled via
803 	 * PCIB_MAP_MSI().
804 	 */
805 	if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
806 	    !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) {
807 		device_printf(pcib,
808 	    "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
809 		    cfg->domain, cfg->bus, cfg->slot, cfg->func);
810 		 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
811 		 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl,
812 		     2);
813 	}
814 #endif
815 /* REG and WREG use carry through to next functions */
816 }
817 
818 /*
819  * PCI Vital Product Data
820  */
821 
822 #define	PCI_VPD_TIMEOUT		1000000
823 
824 static int
825 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
826 {
827 	int count = PCI_VPD_TIMEOUT;
828 
829 	KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
830 
831 	WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
832 
833 	while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
834 		if (--count < 0)
835 			return (ENXIO);
836 		DELAY(1);	/* limit looping */
837 	}
838 	*data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
839 
840 	return (0);
841 }
842 
843 #if 0
844 static int
845 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
846 {
847 	int count = PCI_VPD_TIMEOUT;
848 
849 	KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
850 
851 	WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
852 	WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
853 	while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
854 		if (--count < 0)
855 			return (ENXIO);
856 		DELAY(1);	/* limit looping */
857 	}
858 
859 	return (0);
860 }
861 #endif
862 
863 #undef PCI_VPD_TIMEOUT
864 
865 struct vpd_readstate {
866 	device_t	pcib;
867 	pcicfgregs	*cfg;
868 	uint32_t	val;
869 	int		bytesinval;
870 	int		off;
871 	uint8_t		cksum;
872 };
873 
874 static int
875 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
876 {
877 	uint32_t reg;
878 	uint8_t byte;
879 
880 	if (vrs->bytesinval == 0) {
881 		if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, &reg))
882 			return (ENXIO);
883 		vrs->val = le32toh(reg);
884 		vrs->off += 4;
885 		byte = vrs->val & 0xff;
886 		vrs->bytesinval = 3;
887 	} else {
888 		vrs->val = vrs->val >> 8;
889 		byte = vrs->val & 0xff;
890 		vrs->bytesinval--;
891 	}
892 
893 	vrs->cksum += byte;
894 	*data = byte;
895 	return (0);
896 }
897 
898 static void
899 pci_read_vpd(device_t pcib, pcicfgregs *cfg)
900 {
901 	struct vpd_readstate vrs;
902 	int state;
903 	int name;
904 	int remain;
905 	int i;
906 	int alloc, off;		/* alloc/off for RO/W arrays */
907 	int cksumvalid;
908 	int dflen;
909 	uint8_t byte;
910 	uint8_t byte2;
911 
912 	/* init vpd reader */
913 	vrs.bytesinval = 0;
914 	vrs.off = 0;
915 	vrs.pcib = pcib;
916 	vrs.cfg = cfg;
917 	vrs.cksum = 0;
918 
919 	state = 0;
920 	name = remain = i = 0;	/* shut up stupid gcc */
921 	alloc = off = 0;	/* shut up stupid gcc */
922 	dflen = 0;		/* shut up stupid gcc */
923 	cksumvalid = -1;
924 	while (state >= 0) {
925 		if (vpd_nextbyte(&vrs, &byte)) {
926 			state = -2;
927 			break;
928 		}
929 #if 0
930 		printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \
931 		    "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
932 		    vrs.off, vrs.bytesinval, byte, state, remain, name, i);
933 #endif
934 		switch (state) {
935 		case 0:		/* item name */
936 			if (byte & 0x80) {
937 				if (vpd_nextbyte(&vrs, &byte2)) {
938 					state = -2;
939 					break;
940 				}
941 				remain = byte2;
942 				if (vpd_nextbyte(&vrs, &byte2)) {
943 					state = -2;
944 					break;
945 				}
946 				remain |= byte2 << 8;
947 				if (remain > (0x7f*4 - vrs.off)) {
948 					state = -1;
949 					pci_printf(cfg,
950 					    "invalid VPD data, remain %#x\n",
951 					    remain);
952 				}
953 				name = byte & 0x7f;
954 			} else {
955 				remain = byte & 0x7;
956 				name = (byte >> 3) & 0xf;
957 			}
958 			switch (name) {
959 			case 0x2:	/* String */
960 				cfg->vpd.vpd_ident = malloc(remain + 1,
961 				    M_DEVBUF, M_WAITOK);
962 				i = 0;
963 				state = 1;
964 				break;
965 			case 0xf:	/* End */
966 				state = -1;
967 				break;
968 			case 0x10:	/* VPD-R */
969 				alloc = 8;
970 				off = 0;
971 				cfg->vpd.vpd_ros = malloc(alloc *
972 				    sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
973 				    M_WAITOK | M_ZERO);
974 				state = 2;
975 				break;
976 			case 0x11:	/* VPD-W */
977 				alloc = 8;
978 				off = 0;
979 				cfg->vpd.vpd_w = malloc(alloc *
980 				    sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
981 				    M_WAITOK | M_ZERO);
982 				state = 5;
983 				break;
984 			default:	/* Invalid data, abort */
985 				state = -1;
986 				break;
987 			}
988 			break;
989 
990 		case 1:	/* Identifier String */
991 			cfg->vpd.vpd_ident[i++] = byte;
992 			remain--;
993 			if (remain == 0)  {
994 				cfg->vpd.vpd_ident[i] = '\0';
995 				state = 0;
996 			}
997 			break;
998 
999 		case 2:	/* VPD-R Keyword Header */
1000 			if (off == alloc) {
1001 				cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1002 				    (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1003 				    M_DEVBUF, M_WAITOK | M_ZERO);
1004 			}
1005 			cfg->vpd.vpd_ros[off].keyword[0] = byte;
1006 			if (vpd_nextbyte(&vrs, &byte2)) {
1007 				state = -2;
1008 				break;
1009 			}
1010 			cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1011 			if (vpd_nextbyte(&vrs, &byte2)) {
1012 				state = -2;
1013 				break;
1014 			}
1015 			cfg->vpd.vpd_ros[off].len = dflen = byte2;
1016 			if (dflen == 0 &&
1017 			    strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1018 			    2) == 0) {
1019 				/*
1020 				 * if this happens, we can't trust the rest
1021 				 * of the VPD.
1022 				 */
1023 				pci_printf(cfg, "bad keyword length: %d\n",
1024 				    dflen);
1025 				cksumvalid = 0;
1026 				state = -1;
1027 				break;
1028 			} else if (dflen == 0) {
1029 				cfg->vpd.vpd_ros[off].value = malloc(1 *
1030 				    sizeof(*cfg->vpd.vpd_ros[off].value),
1031 				    M_DEVBUF, M_WAITOK);
1032 				cfg->vpd.vpd_ros[off].value[0] = '\x00';
1033 			} else
1034 				cfg->vpd.vpd_ros[off].value = malloc(
1035 				    (dflen + 1) *
1036 				    sizeof(*cfg->vpd.vpd_ros[off].value),
1037 				    M_DEVBUF, M_WAITOK);
1038 			remain -= 3;
1039 			i = 0;
1040 			/* keep in sync w/ state 3's transistions */
1041 			if (dflen == 0 && remain == 0)
1042 				state = 0;
1043 			else if (dflen == 0)
1044 				state = 2;
1045 			else
1046 				state = 3;
1047 			break;
1048 
1049 		case 3:	/* VPD-R Keyword Value */
1050 			cfg->vpd.vpd_ros[off].value[i++] = byte;
1051 			if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1052 			    "RV", 2) == 0 && cksumvalid == -1) {
1053 				if (vrs.cksum == 0)
1054 					cksumvalid = 1;
1055 				else {
1056 					if (bootverbose)
1057 						pci_printf(cfg,
1058 					    "bad VPD cksum, remain %hhu\n",
1059 						    vrs.cksum);
1060 					cksumvalid = 0;
1061 					state = -1;
1062 					break;
1063 				}
1064 			}
1065 			dflen--;
1066 			remain--;
1067 			/* keep in sync w/ state 2's transistions */
1068 			if (dflen == 0)
1069 				cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1070 			if (dflen == 0 && remain == 0) {
1071 				cfg->vpd.vpd_rocnt = off;
1072 				cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1073 				    off * sizeof(*cfg->vpd.vpd_ros),
1074 				    M_DEVBUF, M_WAITOK | M_ZERO);
1075 				state = 0;
1076 			} else if (dflen == 0)
1077 				state = 2;
1078 			break;
1079 
1080 		case 4:
1081 			remain--;
1082 			if (remain == 0)
1083 				state = 0;
1084 			break;
1085 
1086 		case 5:	/* VPD-W Keyword Header */
1087 			if (off == alloc) {
1088 				cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1089 				    (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1090 				    M_DEVBUF, M_WAITOK | M_ZERO);
1091 			}
1092 			cfg->vpd.vpd_w[off].keyword[0] = byte;
1093 			if (vpd_nextbyte(&vrs, &byte2)) {
1094 				state = -2;
1095 				break;
1096 			}
1097 			cfg->vpd.vpd_w[off].keyword[1] = byte2;
1098 			if (vpd_nextbyte(&vrs, &byte2)) {
1099 				state = -2;
1100 				break;
1101 			}
1102 			cfg->vpd.vpd_w[off].len = dflen = byte2;
1103 			cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1104 			cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1105 			    sizeof(*cfg->vpd.vpd_w[off].value),
1106 			    M_DEVBUF, M_WAITOK);
1107 			remain -= 3;
1108 			i = 0;
1109 			/* keep in sync w/ state 6's transistions */
1110 			if (dflen == 0 && remain == 0)
1111 				state = 0;
1112 			else if (dflen == 0)
1113 				state = 5;
1114 			else
1115 				state = 6;
1116 			break;
1117 
1118 		case 6:	/* VPD-W Keyword Value */
1119 			cfg->vpd.vpd_w[off].value[i++] = byte;
1120 			dflen--;
1121 			remain--;
1122 			/* keep in sync w/ state 5's transistions */
1123 			if (dflen == 0)
1124 				cfg->vpd.vpd_w[off++].value[i++] = '\0';
1125 			if (dflen == 0 && remain == 0) {
1126 				cfg->vpd.vpd_wcnt = off;
1127 				cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1128 				    off * sizeof(*cfg->vpd.vpd_w),
1129 				    M_DEVBUF, M_WAITOK | M_ZERO);
1130 				state = 0;
1131 			} else if (dflen == 0)
1132 				state = 5;
1133 			break;
1134 
1135 		default:
1136 			pci_printf(cfg, "invalid state: %d\n", state);
1137 			state = -1;
1138 			break;
1139 		}
1140 	}
1141 
1142 	if (cksumvalid == 0 || state < -1) {
1143 		/* read-only data bad, clean up */
1144 		if (cfg->vpd.vpd_ros != NULL) {
1145 			for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1146 				free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1147 			free(cfg->vpd.vpd_ros, M_DEVBUF);
1148 			cfg->vpd.vpd_ros = NULL;
1149 		}
1150 	}
1151 	if (state < -1) {
1152 		/* I/O error, clean up */
1153 		pci_printf(cfg, "failed to read VPD data.\n");
1154 		if (cfg->vpd.vpd_ident != NULL) {
1155 			free(cfg->vpd.vpd_ident, M_DEVBUF);
1156 			cfg->vpd.vpd_ident = NULL;
1157 		}
1158 		if (cfg->vpd.vpd_w != NULL) {
1159 			for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1160 				free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1161 			free(cfg->vpd.vpd_w, M_DEVBUF);
1162 			cfg->vpd.vpd_w = NULL;
1163 		}
1164 	}
1165 	cfg->vpd.vpd_cached = 1;
1166 #undef REG
1167 #undef WREG
1168 }
1169 
1170 int
1171 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1172 {
1173 	struct pci_devinfo *dinfo = device_get_ivars(child);
1174 	pcicfgregs *cfg = &dinfo->cfg;
1175 
1176 	if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1177 		pci_read_vpd(device_get_parent(dev), cfg);
1178 
1179 	*identptr = cfg->vpd.vpd_ident;
1180 
1181 	if (*identptr == NULL)
1182 		return (ENXIO);
1183 
1184 	return (0);
1185 }
1186 
1187 int
1188 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1189 	const char **vptr)
1190 {
1191 	struct pci_devinfo *dinfo = device_get_ivars(child);
1192 	pcicfgregs *cfg = &dinfo->cfg;
1193 	int i;
1194 
1195 	if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1196 		pci_read_vpd(device_get_parent(dev), cfg);
1197 
1198 	for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1199 		if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1200 		    sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1201 			*vptr = cfg->vpd.vpd_ros[i].value;
1202 			return (0);
1203 		}
1204 
1205 	*vptr = NULL;
1206 	return (ENXIO);
1207 }
1208 
1209 struct pcicfg_vpd *
1210 pci_fetch_vpd_list(device_t dev)
1211 {
1212 	struct pci_devinfo *dinfo = device_get_ivars(dev);
1213 	pcicfgregs *cfg = &dinfo->cfg;
1214 
1215 	if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1216 		pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1217 	return (&cfg->vpd);
1218 }
1219 
1220 /*
1221  * Find the requested HyperTransport capability and return the offset
1222  * in configuration space via the pointer provided.  The function
1223  * returns 0 on success and an error code otherwise.
1224  */
1225 int
1226 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1227 {
1228 	int ptr, error;
1229 	uint16_t val;
1230 
1231 	error = pci_find_cap(child, PCIY_HT, &ptr);
1232 	if (error)
1233 		return (error);
1234 
1235 	/*
1236 	 * Traverse the capabilities list checking each HT capability
1237 	 * to see if it matches the requested HT capability.
1238 	 */
1239 	while (ptr != 0) {
1240 		val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1241 		if (capability == PCIM_HTCAP_SLAVE ||
1242 		    capability == PCIM_HTCAP_HOST)
1243 			val &= 0xe000;
1244 		else
1245 			val &= PCIM_HTCMD_CAP_MASK;
1246 		if (val == capability) {
1247 			if (capreg != NULL)
1248 				*capreg = ptr;
1249 			return (0);
1250 		}
1251 
1252 		/* Skip to the next HT capability. */
1253 		while (ptr != 0) {
1254 			ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1255 			if (pci_read_config(child, ptr + PCICAP_ID, 1) ==
1256 			    PCIY_HT)
1257 				break;
1258 		}
1259 	}
1260 	return (ENOENT);
1261 }
1262 
1263 /*
1264  * Find the requested capability and return the offset in
1265  * configuration space via the pointer provided.  The function returns
1266  * 0 on success and an error code otherwise.
1267  */
1268 int
1269 pci_find_cap_method(device_t dev, device_t child, int capability,
1270     int *capreg)
1271 {
1272 	struct pci_devinfo *dinfo = device_get_ivars(child);
1273 	pcicfgregs *cfg = &dinfo->cfg;
1274 	u_int32_t status;
1275 	u_int8_t ptr;
1276 
1277 	/*
1278 	 * Check the CAP_LIST bit of the PCI status register first.
1279 	 */
1280 	status = pci_read_config(child, PCIR_STATUS, 2);
1281 	if (!(status & PCIM_STATUS_CAPPRESENT))
1282 		return (ENXIO);
1283 
1284 	/*
1285 	 * Determine the start pointer of the capabilities list.
1286 	 */
1287 	switch (cfg->hdrtype & PCIM_HDRTYPE) {
1288 	case PCIM_HDRTYPE_NORMAL:
1289 	case PCIM_HDRTYPE_BRIDGE:
1290 		ptr = PCIR_CAP_PTR;
1291 		break;
1292 	case PCIM_HDRTYPE_CARDBUS:
1293 		ptr = PCIR_CAP_PTR_2;
1294 		break;
1295 	default:
1296 		/* XXX: panic? */
1297 		return (ENXIO);		/* no extended capabilities support */
1298 	}
1299 	ptr = pci_read_config(child, ptr, 1);
1300 
1301 	/*
1302 	 * Traverse the capabilities list.
1303 	 */
1304 	while (ptr != 0) {
1305 		if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1306 			if (capreg != NULL)
1307 				*capreg = ptr;
1308 			return (0);
1309 		}
1310 		ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1311 	}
1312 
1313 	return (ENOENT);
1314 }
1315 
1316 /*
1317  * Find the requested extended capability and return the offset in
1318  * configuration space via the pointer provided.  The function returns
1319  * 0 on success and an error code otherwise.
1320  */
1321 int
1322 pci_find_extcap_method(device_t dev, device_t child, int capability,
1323     int *capreg)
1324 {
1325 	struct pci_devinfo *dinfo = device_get_ivars(child);
1326 	pcicfgregs *cfg = &dinfo->cfg;
1327 	uint32_t ecap;
1328 	uint16_t ptr;
1329 
1330 	/* Only supported for PCI-express devices. */
1331 	if (cfg->pcie.pcie_location == 0)
1332 		return (ENXIO);
1333 
1334 	ptr = PCIR_EXTCAP;
1335 	ecap = pci_read_config(child, ptr, 4);
1336 	if (ecap == 0xffffffff || ecap == 0)
1337 		return (ENOENT);
1338 	for (;;) {
1339 		if (PCI_EXTCAP_ID(ecap) == capability) {
1340 			if (capreg != NULL)
1341 				*capreg = ptr;
1342 			return (0);
1343 		}
1344 		ptr = PCI_EXTCAP_NEXTPTR(ecap);
1345 		if (ptr == 0)
1346 			break;
1347 		ecap = pci_read_config(child, ptr, 4);
1348 	}
1349 
1350 	return (ENOENT);
1351 }
1352 
1353 /*
1354  * Support for MSI-X message interrupts.
1355  */
1356 void
1357 pci_enable_msix_method(device_t dev, device_t child, u_int index,
1358     uint64_t address, uint32_t data)
1359 {
1360 	struct pci_devinfo *dinfo = device_get_ivars(child);
1361 	struct pcicfg_msix *msix = &dinfo->cfg.msix;
1362 	uint32_t offset;
1363 
1364 	KASSERT(msix->msix_table_len > index, ("bogus index"));
1365 	offset = msix->msix_table_offset + index * 16;
1366 	bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1367 	bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1368 	bus_write_4(msix->msix_table_res, offset + 8, data);
1369 
1370 	/* Enable MSI -> HT mapping. */
1371 	pci_ht_map_msi(child, address);
1372 }
1373 
1374 void
1375 pci_mask_msix(device_t dev, u_int index)
1376 {
1377 	struct pci_devinfo *dinfo = device_get_ivars(dev);
1378 	struct pcicfg_msix *msix = &dinfo->cfg.msix;
1379 	uint32_t offset, val;
1380 
1381 	KASSERT(msix->msix_msgnum > index, ("bogus index"));
1382 	offset = msix->msix_table_offset + index * 16 + 12;
1383 	val = bus_read_4(msix->msix_table_res, offset);
1384 	if (!(val & PCIM_MSIX_VCTRL_MASK)) {
1385 		val |= PCIM_MSIX_VCTRL_MASK;
1386 		bus_write_4(msix->msix_table_res, offset, val);
1387 	}
1388 }
1389 
1390 void
1391 pci_unmask_msix(device_t dev, u_int index)
1392 {
1393 	struct pci_devinfo *dinfo = device_get_ivars(dev);
1394 	struct pcicfg_msix *msix = &dinfo->cfg.msix;
1395 	uint32_t offset, val;
1396 
1397 	KASSERT(msix->msix_table_len > index, ("bogus index"));
1398 	offset = msix->msix_table_offset + index * 16 + 12;
1399 	val = bus_read_4(msix->msix_table_res, offset);
1400 	if (val & PCIM_MSIX_VCTRL_MASK) {
1401 		val &= ~PCIM_MSIX_VCTRL_MASK;
1402 		bus_write_4(msix->msix_table_res, offset, val);
1403 	}
1404 }
1405 
1406 int
1407 pci_pending_msix(device_t dev, u_int index)
1408 {
1409 	struct pci_devinfo *dinfo = device_get_ivars(dev);
1410 	struct pcicfg_msix *msix = &dinfo->cfg.msix;
1411 	uint32_t offset, bit;
1412 
1413 	KASSERT(msix->msix_table_len > index, ("bogus index"));
1414 	offset = msix->msix_pba_offset + (index / 32) * 4;
1415 	bit = 1 << index % 32;
1416 	return (bus_read_4(msix->msix_pba_res, offset) & bit);
1417 }
1418 
1419 /*
1420  * Restore MSI-X registers and table during resume.  If MSI-X is
1421  * enabled then walk the virtual table to restore the actual MSI-X
1422  * table.
1423  */
1424 static void
1425 pci_resume_msix(device_t dev)
1426 {
1427 	struct pci_devinfo *dinfo = device_get_ivars(dev);
1428 	struct pcicfg_msix *msix = &dinfo->cfg.msix;
1429 	struct msix_table_entry *mte;
1430 	struct msix_vector *mv;
1431 	int i;
1432 
1433 	if (msix->msix_alloc > 0) {
1434 		/* First, mask all vectors. */
1435 		for (i = 0; i < msix->msix_msgnum; i++)
1436 			pci_mask_msix(dev, i);
1437 
1438 		/* Second, program any messages with at least one handler. */
1439 		for (i = 0; i < msix->msix_table_len; i++) {
1440 			mte = &msix->msix_table[i];
1441 			if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1442 				continue;
1443 			mv = &msix->msix_vectors[mte->mte_vector - 1];
1444 			pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
1445 			pci_unmask_msix(dev, i);
1446 		}
1447 	}
1448 	pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1449 	    msix->msix_ctrl, 2);
1450 }
1451 
1452 /*
1453  * Attempt to allocate *count MSI-X messages.  The actual number allocated is
1454  * returned in *count.  After this function returns, each message will be
1455  * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1456  */
1457 int
1458 pci_alloc_msix_method(device_t dev, device_t child, int *count)
1459 {
1460 	struct pci_devinfo *dinfo = device_get_ivars(child);
1461 	pcicfgregs *cfg = &dinfo->cfg;
1462 	struct resource_list_entry *rle;
1463 	int actual, error, i, irq, max;
1464 
1465 	/* Don't let count == 0 get us into trouble. */
1466 	if (*count == 0)
1467 		return (EINVAL);
1468 
1469 	/* If rid 0 is allocated, then fail. */
1470 	rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1471 	if (rle != NULL && rle->res != NULL)
1472 		return (ENXIO);
1473 
1474 	/* Already have allocated messages? */
1475 	if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1476 		return (ENXIO);
1477 
1478 	/* If MSI-X is blacklisted for this system, fail. */
1479 	if (pci_msix_blacklisted())
1480 		return (ENXIO);
1481 
1482 	/* MSI-X capability present? */
1483 	if (cfg->msix.msix_location == 0 || !pci_do_msix)
1484 		return (ENODEV);
1485 
1486 	/* Make sure the appropriate BARs are mapped. */
1487 	rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1488 	    cfg->msix.msix_table_bar);
1489 	if (rle == NULL || rle->res == NULL ||
1490 	    !(rman_get_flags(rle->res) & RF_ACTIVE))
1491 		return (ENXIO);
1492 	cfg->msix.msix_table_res = rle->res;
1493 	if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1494 		rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1495 		    cfg->msix.msix_pba_bar);
1496 		if (rle == NULL || rle->res == NULL ||
1497 		    !(rman_get_flags(rle->res) & RF_ACTIVE))
1498 			return (ENXIO);
1499 	}
1500 	cfg->msix.msix_pba_res = rle->res;
1501 
1502 	if (bootverbose)
1503 		device_printf(child,
1504 		    "attempting to allocate %d MSI-X vectors (%d supported)\n",
1505 		    *count, cfg->msix.msix_msgnum);
1506 	max = min(*count, cfg->msix.msix_msgnum);
1507 	for (i = 0; i < max; i++) {
1508 		/* Allocate a message. */
1509 		error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1510 		if (error) {
1511 			if (i == 0)
1512 				return (error);
1513 			break;
1514 		}
1515 		resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1516 		    irq, 1);
1517 	}
1518 	actual = i;
1519 
1520 	if (bootverbose) {
1521 		rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1522 		if (actual == 1)
1523 			device_printf(child, "using IRQ %lu for MSI-X\n",
1524 			    rle->start);
1525 		else {
1526 			int run;
1527 
1528 			/*
1529 			 * Be fancy and try to print contiguous runs of
1530 			 * IRQ values as ranges.  'irq' is the previous IRQ.
1531 			 * 'run' is true if we are in a range.
1532 			 */
1533 			device_printf(child, "using IRQs %lu", rle->start);
1534 			irq = rle->start;
1535 			run = 0;
1536 			for (i = 1; i < actual; i++) {
1537 				rle = resource_list_find(&dinfo->resources,
1538 				    SYS_RES_IRQ, i + 1);
1539 
1540 				/* Still in a run? */
1541 				if (rle->start == irq + 1) {
1542 					run = 1;
1543 					irq++;
1544 					continue;
1545 				}
1546 
1547 				/* Finish previous range. */
1548 				if (run) {
1549 					printf("-%d", irq);
1550 					run = 0;
1551 				}
1552 
1553 				/* Start new range. */
1554 				printf(",%lu", rle->start);
1555 				irq = rle->start;
1556 			}
1557 
1558 			/* Unfinished range? */
1559 			if (run)
1560 				printf("-%d", irq);
1561 			printf(" for MSI-X\n");
1562 		}
1563 	}
1564 
1565 	/* Mask all vectors. */
1566 	for (i = 0; i < cfg->msix.msix_msgnum; i++)
1567 		pci_mask_msix(child, i);
1568 
1569 	/* Allocate and initialize vector data and virtual table. */
1570 	cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1571 	    M_DEVBUF, M_WAITOK | M_ZERO);
1572 	cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1573 	    M_DEVBUF, M_WAITOK | M_ZERO);
1574 	for (i = 0; i < actual; i++) {
1575 		rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1576 		cfg->msix.msix_vectors[i].mv_irq = rle->start;
1577 		cfg->msix.msix_table[i].mte_vector = i + 1;
1578 	}
1579 
1580 	/* Update control register to enable MSI-X. */
1581 	cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1582 	pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1583 	    cfg->msix.msix_ctrl, 2);
1584 
1585 	/* Update counts of alloc'd messages. */
1586 	cfg->msix.msix_alloc = actual;
1587 	cfg->msix.msix_table_len = actual;
1588 	*count = actual;
1589 	return (0);
1590 }
1591 
1592 /*
1593  * By default, pci_alloc_msix() will assign the allocated IRQ
1594  * resources consecutively to the first N messages in the MSI-X table.
1595  * However, device drivers may want to use different layouts if they
1596  * either receive fewer messages than they asked for, or they wish to
1597  * populate the MSI-X table sparsely.  This method allows the driver
1598  * to specify what layout it wants.  It must be called after a
1599  * successful pci_alloc_msix() but before any of the associated
1600  * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1601  *
1602  * The 'vectors' array contains 'count' message vectors.  The array
1603  * maps directly to the MSI-X table in that index 0 in the array
1604  * specifies the vector for the first message in the MSI-X table, etc.
1605  * The vector value in each array index can either be 0 to indicate
1606  * that no vector should be assigned to a message slot, or it can be a
1607  * number from 1 to N (where N is the count returned from a
1608  * succcessful call to pci_alloc_msix()) to indicate which message
1609  * vector (IRQ) to be used for the corresponding message.
1610  *
1611  * On successful return, each message with a non-zero vector will have
1612  * an associated SYS_RES_IRQ whose rid is equal to the array index +
1613  * 1.  Additionally, if any of the IRQs allocated via the previous
1614  * call to pci_alloc_msix() are not used in the mapping, those IRQs
1615  * will be freed back to the system automatically.
1616  *
1617  * For example, suppose a driver has a MSI-X table with 6 messages and
1618  * asks for 6 messages, but pci_alloc_msix() only returns a count of
1619  * 3.  Call the three vectors allocated by pci_alloc_msix() A, B, and
1620  * C.  After the call to pci_alloc_msix(), the device will be setup to
1621  * have an MSI-X table of ABC--- (where - means no vector assigned).
1622  * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1623  * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1624  * be freed back to the system.  This device will also have valid
1625  * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1626  *
1627  * In any case, the SYS_RES_IRQ rid X will always map to the message
1628  * at MSI-X table index X - 1 and will only be valid if a vector is
1629  * assigned to that table entry.
1630  */
1631 int
1632 pci_remap_msix_method(device_t dev, device_t child, int count,
1633     const u_int *vectors)
1634 {
1635 	struct pci_devinfo *dinfo = device_get_ivars(child);
1636 	struct pcicfg_msix *msix = &dinfo->cfg.msix;
1637 	struct resource_list_entry *rle;
1638 	int i, irq, j, *used;
1639 
1640 	/*
1641 	 * Have to have at least one message in the table but the
1642 	 * table can't be bigger than the actual MSI-X table in the
1643 	 * device.
1644 	 */
1645 	if (count == 0 || count > msix->msix_msgnum)
1646 		return (EINVAL);
1647 
1648 	/* Sanity check the vectors. */
1649 	for (i = 0; i < count; i++)
1650 		if (vectors[i] > msix->msix_alloc)
1651 			return (EINVAL);
1652 
1653 	/*
1654 	 * Make sure there aren't any holes in the vectors to be used.
1655 	 * It's a big pain to support it, and it doesn't really make
1656 	 * sense anyway.  Also, at least one vector must be used.
1657 	 */
1658 	used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
1659 	    M_ZERO);
1660 	for (i = 0; i < count; i++)
1661 		if (vectors[i] != 0)
1662 			used[vectors[i] - 1] = 1;
1663 	for (i = 0; i < msix->msix_alloc - 1; i++)
1664 		if (used[i] == 0 && used[i + 1] == 1) {
1665 			free(used, M_DEVBUF);
1666 			return (EINVAL);
1667 		}
1668 	if (used[0] != 1) {
1669 		free(used, M_DEVBUF);
1670 		return (EINVAL);
1671 	}
1672 
1673 	/* Make sure none of the resources are allocated. */
1674 	for (i = 0; i < msix->msix_table_len; i++) {
1675 		if (msix->msix_table[i].mte_vector == 0)
1676 			continue;
1677 		if (msix->msix_table[i].mte_handlers > 0)
1678 			return (EBUSY);
1679 		rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1680 		KASSERT(rle != NULL, ("missing resource"));
1681 		if (rle->res != NULL)
1682 			return (EBUSY);
1683 	}
1684 
1685 	/* Free the existing resource list entries. */
1686 	for (i = 0; i < msix->msix_table_len; i++) {
1687 		if (msix->msix_table[i].mte_vector == 0)
1688 			continue;
1689 		resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1690 	}
1691 
1692 	/*
1693 	 * Build the new virtual table keeping track of which vectors are
1694 	 * used.
1695 	 */
1696 	free(msix->msix_table, M_DEVBUF);
1697 	msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
1698 	    M_DEVBUF, M_WAITOK | M_ZERO);
1699 	for (i = 0; i < count; i++)
1700 		msix->msix_table[i].mte_vector = vectors[i];
1701 	msix->msix_table_len = count;
1702 
1703 	/* Free any unused IRQs and resize the vectors array if necessary. */
1704 	j = msix->msix_alloc - 1;
1705 	if (used[j] == 0) {
1706 		struct msix_vector *vec;
1707 
1708 		while (used[j] == 0) {
1709 			PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1710 			    msix->msix_vectors[j].mv_irq);
1711 			j--;
1712 		}
1713 		vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
1714 		    M_WAITOK);
1715 		bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
1716 		    (j + 1));
1717 		free(msix->msix_vectors, M_DEVBUF);
1718 		msix->msix_vectors = vec;
1719 		msix->msix_alloc = j + 1;
1720 	}
1721 	free(used, M_DEVBUF);
1722 
1723 	/* Map the IRQs onto the rids. */
1724 	for (i = 0; i < count; i++) {
1725 		if (vectors[i] == 0)
1726 			continue;
1727 		irq = msix->msix_vectors[vectors[i]].mv_irq;
1728 		resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1729 		    irq, 1);
1730 	}
1731 
1732 	if (bootverbose) {
1733 		device_printf(child, "Remapped MSI-X IRQs as: ");
1734 		for (i = 0; i < count; i++) {
1735 			if (i != 0)
1736 				printf(", ");
1737 			if (vectors[i] == 0)
1738 				printf("---");
1739 			else
1740 				printf("%d",
1741 				    msix->msix_vectors[vectors[i]].mv_irq);
1742 		}
1743 		printf("\n");
1744 	}
1745 
1746 	return (0);
1747 }
1748 
1749 static int
1750 pci_release_msix(device_t dev, device_t child)
1751 {
1752 	struct pci_devinfo *dinfo = device_get_ivars(child);
1753 	struct pcicfg_msix *msix = &dinfo->cfg.msix;
1754 	struct resource_list_entry *rle;
1755 	int i;
1756 
1757 	/* Do we have any messages to release? */
1758 	if (msix->msix_alloc == 0)
1759 		return (ENODEV);
1760 
1761 	/* Make sure none of the resources are allocated. */
1762 	for (i = 0; i < msix->msix_table_len; i++) {
1763 		if (msix->msix_table[i].mte_vector == 0)
1764 			continue;
1765 		if (msix->msix_table[i].mte_handlers > 0)
1766 			return (EBUSY);
1767 		rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1768 		KASSERT(rle != NULL, ("missing resource"));
1769 		if (rle->res != NULL)
1770 			return (EBUSY);
1771 	}
1772 
1773 	/* Update control register to disable MSI-X. */
1774 	msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
1775 	pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
1776 	    msix->msix_ctrl, 2);
1777 
1778 	/* Free the resource list entries. */
1779 	for (i = 0; i < msix->msix_table_len; i++) {
1780 		if (msix->msix_table[i].mte_vector == 0)
1781 			continue;
1782 		resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1783 	}
1784 	free(msix->msix_table, M_DEVBUF);
1785 	msix->msix_table_len = 0;
1786 
1787 	/* Release the IRQs. */
1788 	for (i = 0; i < msix->msix_alloc; i++)
1789 		PCIB_RELEASE_MSIX(device_get_parent(dev), child,
1790 		    msix->msix_vectors[i].mv_irq);
1791 	free(msix->msix_vectors, M_DEVBUF);
1792 	msix->msix_alloc = 0;
1793 	return (0);
1794 }
1795 
1796 /*
1797  * Return the max supported MSI-X messages this device supports.
1798  * Basically, assuming the MD code can alloc messages, this function
1799  * should return the maximum value that pci_alloc_msix() can return.
1800  * Thus, it is subject to the tunables, etc.
1801  */
1802 int
1803 pci_msix_count_method(device_t dev, device_t child)
1804 {
1805 	struct pci_devinfo *dinfo = device_get_ivars(child);
1806 	struct pcicfg_msix *msix = &dinfo->cfg.msix;
1807 
1808 	if (pci_do_msix && msix->msix_location != 0)
1809 		return (msix->msix_msgnum);
1810 	return (0);
1811 }
1812 
1813 /*
1814  * HyperTransport MSI mapping control
1815  */
1816 void
1817 pci_ht_map_msi(device_t dev, uint64_t addr)
1818 {
1819 	struct pci_devinfo *dinfo = device_get_ivars(dev);
1820 	struct pcicfg_ht *ht = &dinfo->cfg.ht;
1821 
1822 	if (!ht->ht_msimap)
1823 		return;
1824 
1825 	if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
1826 	    ht->ht_msiaddr >> 20 == addr >> 20) {
1827 		/* Enable MSI -> HT mapping. */
1828 		ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
1829 		pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1830 		    ht->ht_msictrl, 2);
1831 	}
1832 
1833 	if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
1834 		/* Disable MSI -> HT mapping. */
1835 		ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
1836 		pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
1837 		    ht->ht_msictrl, 2);
1838 	}
1839 }
1840 
1841 int
1842 pci_get_max_read_req(device_t dev)
1843 {
1844 	struct pci_devinfo *dinfo = device_get_ivars(dev);
1845 	int cap;
1846 	uint16_t val;
1847 
1848 	cap = dinfo->cfg.pcie.pcie_location;
1849 	if (cap == 0)
1850 		return (0);
1851 	val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1852 	val &= PCIEM_CTL_MAX_READ_REQUEST;
1853 	val >>= 12;
1854 	return (1 << (val + 7));
1855 }
1856 
1857 int
1858 pci_set_max_read_req(device_t dev, int size)
1859 {
1860 	struct pci_devinfo *dinfo = device_get_ivars(dev);
1861 	int cap;
1862 	uint16_t val;
1863 
1864 	cap = dinfo->cfg.pcie.pcie_location;
1865 	if (cap == 0)
1866 		return (0);
1867 	if (size < 128)
1868 		size = 128;
1869 	if (size > 4096)
1870 		size = 4096;
1871 	size = (1 << (fls(size) - 1));
1872 	val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
1873 	val &= ~PCIEM_CTL_MAX_READ_REQUEST;
1874 	val |= (fls(size) - 8) << 12;
1875 	pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
1876 	return (size);
1877 }
1878 
1879 /*
1880  * Support for MSI message signalled interrupts.
1881  */
1882 void
1883 pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
1884     uint16_t data)
1885 {
1886 	struct pci_devinfo *dinfo = device_get_ivars(child);
1887 	struct pcicfg_msi *msi = &dinfo->cfg.msi;
1888 
1889 	/* Write data and address values. */
1890 	pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
1891 	    address & 0xffffffff, 4);
1892 	if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1893 		pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1894 		    address >> 32, 4);
1895 		pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
1896 		    data, 2);
1897 	} else
1898 		pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
1899 		    2);
1900 
1901 	/* Enable MSI in the control register. */
1902 	msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1903 	pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1904 	    msi->msi_ctrl, 2);
1905 
1906 	/* Enable MSI -> HT mapping. */
1907 	pci_ht_map_msi(child, address);
1908 }
1909 
1910 void
1911 pci_disable_msi_method(device_t dev, device_t child)
1912 {
1913 	struct pci_devinfo *dinfo = device_get_ivars(child);
1914 	struct pcicfg_msi *msi = &dinfo->cfg.msi;
1915 
1916 	/* Disable MSI -> HT mapping. */
1917 	pci_ht_map_msi(child, 0);
1918 
1919 	/* Disable MSI in the control register. */
1920 	msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1921 	pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1922 	    msi->msi_ctrl, 2);
1923 }
1924 
1925 /*
1926  * Restore MSI registers during resume.  If MSI is enabled then
1927  * restore the data and address registers in addition to the control
1928  * register.
1929  */
1930 static void
1931 pci_resume_msi(device_t dev)
1932 {
1933 	struct pci_devinfo *dinfo = device_get_ivars(dev);
1934 	struct pcicfg_msi *msi = &dinfo->cfg.msi;
1935 	uint64_t address;
1936 	uint16_t data;
1937 
1938 	if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1939 		address = msi->msi_addr;
1940 		data = msi->msi_data;
1941 		pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1942 		    address & 0xffffffff, 4);
1943 		if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1944 			pci_write_config(dev, msi->msi_location +
1945 			    PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1946 			pci_write_config(dev, msi->msi_location +
1947 			    PCIR_MSI_DATA_64BIT, data, 2);
1948 		} else
1949 			pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1950 			    data, 2);
1951 	}
1952 	pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1953 	    2);
1954 }
1955 
1956 static int
1957 pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
1958 {
1959 	struct pci_devinfo *dinfo = device_get_ivars(dev);
1960 	pcicfgregs *cfg = &dinfo->cfg;
1961 	struct resource_list_entry *rle;
1962 	struct msix_table_entry *mte;
1963 	struct msix_vector *mv;
1964 	uint64_t addr;
1965 	uint32_t data;
1966 	int error, i, j;
1967 
1968 	/*
1969 	 * Handle MSI first.  We try to find this IRQ among our list
1970 	 * of MSI IRQs.  If we find it, we request updated address and
1971 	 * data registers and apply the results.
1972 	 */
1973 	if (cfg->msi.msi_alloc > 0) {
1974 
1975 		/* If we don't have any active handlers, nothing to do. */
1976 		if (cfg->msi.msi_handlers == 0)
1977 			return (0);
1978 		for (i = 0; i < cfg->msi.msi_alloc; i++) {
1979 			rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1980 			    i + 1);
1981 			if (rle->start == irq) {
1982 				error = PCIB_MAP_MSI(device_get_parent(bus),
1983 				    dev, irq, &addr, &data);
1984 				if (error)
1985 					return (error);
1986 				pci_disable_msi(dev);
1987 				dinfo->cfg.msi.msi_addr = addr;
1988 				dinfo->cfg.msi.msi_data = data;
1989 				pci_enable_msi(dev, addr, data);
1990 				return (0);
1991 			}
1992 		}
1993 		return (ENOENT);
1994 	}
1995 
1996 	/*
1997 	 * For MSI-X, we check to see if we have this IRQ.  If we do,
1998 	 * we request the updated mapping info.  If that works, we go
1999 	 * through all the slots that use this IRQ and update them.
2000 	 */
2001 	if (cfg->msix.msix_alloc > 0) {
2002 		for (i = 0; i < cfg->msix.msix_alloc; i++) {
2003 			mv = &cfg->msix.msix_vectors[i];
2004 			if (mv->mv_irq == irq) {
2005 				error = PCIB_MAP_MSI(device_get_parent(bus),
2006 				    dev, irq, &addr, &data);
2007 				if (error)
2008 					return (error);
2009 				mv->mv_address = addr;
2010 				mv->mv_data = data;
2011 				for (j = 0; j < cfg->msix.msix_table_len; j++) {
2012 					mte = &cfg->msix.msix_table[j];
2013 					if (mte->mte_vector != i + 1)
2014 						continue;
2015 					if (mte->mte_handlers == 0)
2016 						continue;
2017 					pci_mask_msix(dev, j);
2018 					pci_enable_msix(dev, j, addr, data);
2019 					pci_unmask_msix(dev, j);
2020 				}
2021 			}
2022 		}
2023 		return (ENOENT);
2024 	}
2025 
2026 	return (ENOENT);
2027 }
2028 
2029 /*
2030  * Returns true if the specified device is blacklisted because MSI
2031  * doesn't work.
2032  */
2033 int
2034 pci_msi_device_blacklisted(device_t dev)
2035 {
2036 
2037 	if (!pci_honor_msi_blacklist)
2038 		return (0);
2039 
2040 	return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2041 }
2042 
2043 /*
2044  * Determine if MSI is blacklisted globally on this system.  Currently,
2045  * we just check for blacklisted chipsets as represented by the
2046  * host-PCI bridge at device 0:0:0.  In the future, it may become
2047  * necessary to check other system attributes, such as the kenv values
2048  * that give the motherboard manufacturer and model number.
2049  */
2050 static int
2051 pci_msi_blacklisted(void)
2052 {
2053 	device_t dev;
2054 
2055 	if (!pci_honor_msi_blacklist)
2056 		return (0);
2057 
2058 	/* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2059 	if (!(pcie_chipset || pcix_chipset)) {
2060 		if (vm_guest != VM_GUEST_NO) {
2061 			/*
2062 			 * Whitelist older chipsets in virtual
2063 			 * machines known to support MSI.
2064 			 */
2065 			dev = pci_find_bsf(0, 0, 0);
2066 			if (dev != NULL)
2067 				return (!pci_has_quirk(pci_get_devid(dev),
2068 					PCI_QUIRK_ENABLE_MSI_VM));
2069 		}
2070 		return (1);
2071 	}
2072 
2073 	dev = pci_find_bsf(0, 0, 0);
2074 	if (dev != NULL)
2075 		return (pci_msi_device_blacklisted(dev));
2076 	return (0);
2077 }
2078 
2079 /*
2080  * Returns true if the specified device is blacklisted because MSI-X
2081  * doesn't work.  Note that this assumes that if MSI doesn't work,
2082  * MSI-X doesn't either.
2083  */
2084 int
2085 pci_msix_device_blacklisted(device_t dev)
2086 {
2087 
2088 	if (!pci_honor_msi_blacklist)
2089 		return (0);
2090 
2091 	if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2092 		return (1);
2093 
2094 	return (pci_msi_device_blacklisted(dev));
2095 }
2096 
2097 /*
2098  * Determine if MSI-X is blacklisted globally on this system.  If MSI
2099  * is blacklisted, assume that MSI-X is as well.  Check for additional
2100  * chipsets where MSI works but MSI-X does not.
2101  */
2102 static int
2103 pci_msix_blacklisted(void)
2104 {
2105 	device_t dev;
2106 
2107 	if (!pci_honor_msi_blacklist)
2108 		return (0);
2109 
2110 	dev = pci_find_bsf(0, 0, 0);
2111 	if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2112 	    PCI_QUIRK_DISABLE_MSIX))
2113 		return (1);
2114 
2115 	return (pci_msi_blacklisted());
2116 }
2117 
2118 /*
2119  * Attempt to allocate *count MSI messages.  The actual number allocated is
2120  * returned in *count.  After this function returns, each message will be
2121  * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2122  */
2123 int
2124 pci_alloc_msi_method(device_t dev, device_t child, int *count)
2125 {
2126 	struct pci_devinfo *dinfo = device_get_ivars(child);
2127 	pcicfgregs *cfg = &dinfo->cfg;
2128 	struct resource_list_entry *rle;
2129 	int actual, error, i, irqs[32];
2130 	uint16_t ctrl;
2131 
2132 	/* Don't let count == 0 get us into trouble. */
2133 	if (*count == 0)
2134 		return (EINVAL);
2135 
2136 	/* If rid 0 is allocated, then fail. */
2137 	rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2138 	if (rle != NULL && rle->res != NULL)
2139 		return (ENXIO);
2140 
2141 	/* Already have allocated messages? */
2142 	if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2143 		return (ENXIO);
2144 
2145 	/* If MSI is blacklisted for this system, fail. */
2146 	if (pci_msi_blacklisted())
2147 		return (ENXIO);
2148 
2149 	/* MSI capability present? */
2150 	if (cfg->msi.msi_location == 0 || !pci_do_msi)
2151 		return (ENODEV);
2152 
2153 	if (bootverbose)
2154 		device_printf(child,
2155 		    "attempting to allocate %d MSI vectors (%d supported)\n",
2156 		    *count, cfg->msi.msi_msgnum);
2157 
2158 	/* Don't ask for more than the device supports. */
2159 	actual = min(*count, cfg->msi.msi_msgnum);
2160 
2161 	/* Don't ask for more than 32 messages. */
2162 	actual = min(actual, 32);
2163 
2164 	/* MSI requires power of 2 number of messages. */
2165 	if (!powerof2(actual))
2166 		return (EINVAL);
2167 
2168 	for (;;) {
2169 		/* Try to allocate N messages. */
2170 		error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2171 		    actual, irqs);
2172 		if (error == 0)
2173 			break;
2174 		if (actual == 1)
2175 			return (error);
2176 
2177 		/* Try N / 2. */
2178 		actual >>= 1;
2179 	}
2180 
2181 	/*
2182 	 * We now have N actual messages mapped onto SYS_RES_IRQ
2183 	 * resources in the irqs[] array, so add new resources
2184 	 * starting at rid 1.
2185 	 */
2186 	for (i = 0; i < actual; i++)
2187 		resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2188 		    irqs[i], irqs[i], 1);
2189 
2190 	if (bootverbose) {
2191 		if (actual == 1)
2192 			device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2193 		else {
2194 			int run;
2195 
2196 			/*
2197 			 * Be fancy and try to print contiguous runs
2198 			 * of IRQ values as ranges.  'run' is true if
2199 			 * we are in a range.
2200 			 */
2201 			device_printf(child, "using IRQs %d", irqs[0]);
2202 			run = 0;
2203 			for (i = 1; i < actual; i++) {
2204 
2205 				/* Still in a run? */
2206 				if (irqs[i] == irqs[i - 1] + 1) {
2207 					run = 1;
2208 					continue;
2209 				}
2210 
2211 				/* Finish previous range. */
2212 				if (run) {
2213 					printf("-%d", irqs[i - 1]);
2214 					run = 0;
2215 				}
2216 
2217 				/* Start new range. */
2218 				printf(",%d", irqs[i]);
2219 			}
2220 
2221 			/* Unfinished range? */
2222 			if (run)
2223 				printf("-%d", irqs[actual - 1]);
2224 			printf(" for MSI\n");
2225 		}
2226 	}
2227 
2228 	/* Update control register with actual count. */
2229 	ctrl = cfg->msi.msi_ctrl;
2230 	ctrl &= ~PCIM_MSICTRL_MME_MASK;
2231 	ctrl |= (ffs(actual) - 1) << 4;
2232 	cfg->msi.msi_ctrl = ctrl;
2233 	pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2234 
2235 	/* Update counts of alloc'd messages. */
2236 	cfg->msi.msi_alloc = actual;
2237 	cfg->msi.msi_handlers = 0;
2238 	*count = actual;
2239 	return (0);
2240 }
2241 
2242 /* Release the MSI messages associated with this device. */
2243 int
2244 pci_release_msi_method(device_t dev, device_t child)
2245 {
2246 	struct pci_devinfo *dinfo = device_get_ivars(child);
2247 	struct pcicfg_msi *msi = &dinfo->cfg.msi;
2248 	struct resource_list_entry *rle;
2249 	int error, i, irqs[32];
2250 
2251 	/* Try MSI-X first. */
2252 	error = pci_release_msix(dev, child);
2253 	if (error != ENODEV)
2254 		return (error);
2255 
2256 	/* Do we have any messages to release? */
2257 	if (msi->msi_alloc == 0)
2258 		return (ENODEV);
2259 	KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2260 
2261 	/* Make sure none of the resources are allocated. */
2262 	if (msi->msi_handlers > 0)
2263 		return (EBUSY);
2264 	for (i = 0; i < msi->msi_alloc; i++) {
2265 		rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2266 		KASSERT(rle != NULL, ("missing MSI resource"));
2267 		if (rle->res != NULL)
2268 			return (EBUSY);
2269 		irqs[i] = rle->start;
2270 	}
2271 
2272 	/* Update control register with 0 count. */
2273 	KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2274 	    ("%s: MSI still enabled", __func__));
2275 	msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2276 	pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2277 	    msi->msi_ctrl, 2);
2278 
2279 	/* Release the messages. */
2280 	PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2281 	for (i = 0; i < msi->msi_alloc; i++)
2282 		resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2283 
2284 	/* Update alloc count. */
2285 	msi->msi_alloc = 0;
2286 	msi->msi_addr = 0;
2287 	msi->msi_data = 0;
2288 	return (0);
2289 }
2290 
2291 /*
2292  * Return the max supported MSI messages this device supports.
2293  * Basically, assuming the MD code can alloc messages, this function
2294  * should return the maximum value that pci_alloc_msi() can return.
2295  * Thus, it is subject to the tunables, etc.
2296  */
2297 int
2298 pci_msi_count_method(device_t dev, device_t child)
2299 {
2300 	struct pci_devinfo *dinfo = device_get_ivars(child);
2301 	struct pcicfg_msi *msi = &dinfo->cfg.msi;
2302 
2303 	if (pci_do_msi && msi->msi_location != 0)
2304 		return (msi->msi_msgnum);
2305 	return (0);
2306 }
2307 
2308 /* free pcicfgregs structure and all depending data structures */
2309 
2310 int
2311 pci_freecfg(struct pci_devinfo *dinfo)
2312 {
2313 	struct devlist *devlist_head;
2314 	struct pci_map *pm, *next;
2315 	int i;
2316 
2317 	devlist_head = &pci_devq;
2318 
2319 	if (dinfo->cfg.vpd.vpd_reg) {
2320 		free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2321 		for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2322 			free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2323 		free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2324 		for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2325 			free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2326 		free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2327 	}
2328 	STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2329 		free(pm, M_DEVBUF);
2330 	}
2331 	STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2332 	free(dinfo, M_DEVBUF);
2333 
2334 	/* increment the generation count */
2335 	pci_generation++;
2336 
2337 	/* we're losing one device */
2338 	pci_numdevs--;
2339 	return (0);
2340 }
2341 
2342 /*
2343  * PCI power manangement
2344  */
2345 int
2346 pci_set_powerstate_method(device_t dev, device_t child, int state)
2347 {
2348 	struct pci_devinfo *dinfo = device_get_ivars(child);
2349 	pcicfgregs *cfg = &dinfo->cfg;
2350 	uint16_t status;
2351 	int result, oldstate, highest, delay;
2352 
2353 	if (cfg->pp.pp_cap == 0)
2354 		return (EOPNOTSUPP);
2355 
2356 	/*
2357 	 * Optimize a no state change request away.  While it would be OK to
2358 	 * write to the hardware in theory, some devices have shown odd
2359 	 * behavior when going from D3 -> D3.
2360 	 */
2361 	oldstate = pci_get_powerstate(child);
2362 	if (oldstate == state)
2363 		return (0);
2364 
2365 	/*
2366 	 * The PCI power management specification states that after a state
2367 	 * transition between PCI power states, system software must
2368 	 * guarantee a minimal delay before the function accesses the device.
2369 	 * Compute the worst case delay that we need to guarantee before we
2370 	 * access the device.  Many devices will be responsive much more
2371 	 * quickly than this delay, but there are some that don't respond
2372 	 * instantly to state changes.  Transitions to/from D3 state require
2373 	 * 10ms, while D2 requires 200us, and D0/1 require none.  The delay
2374 	 * is done below with DELAY rather than a sleeper function because
2375 	 * this function can be called from contexts where we cannot sleep.
2376 	 */
2377 	highest = (oldstate > state) ? oldstate : state;
2378 	if (highest == PCI_POWERSTATE_D3)
2379 	    delay = 10000;
2380 	else if (highest == PCI_POWERSTATE_D2)
2381 	    delay = 200;
2382 	else
2383 	    delay = 0;
2384 	status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2385 	    & ~PCIM_PSTAT_DMASK;
2386 	result = 0;
2387 	switch (state) {
2388 	case PCI_POWERSTATE_D0:
2389 		status |= PCIM_PSTAT_D0;
2390 		break;
2391 	case PCI_POWERSTATE_D1:
2392 		if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2393 			return (EOPNOTSUPP);
2394 		status |= PCIM_PSTAT_D1;
2395 		break;
2396 	case PCI_POWERSTATE_D2:
2397 		if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2398 			return (EOPNOTSUPP);
2399 		status |= PCIM_PSTAT_D2;
2400 		break;
2401 	case PCI_POWERSTATE_D3:
2402 		status |= PCIM_PSTAT_D3;
2403 		break;
2404 	default:
2405 		return (EINVAL);
2406 	}
2407 
2408 	if (bootverbose)
2409 		pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2410 		    state);
2411 
2412 	PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2413 	if (delay)
2414 		DELAY(delay);
2415 	return (0);
2416 }
2417 
2418 int
2419 pci_get_powerstate_method(device_t dev, device_t child)
2420 {
2421 	struct pci_devinfo *dinfo = device_get_ivars(child);
2422 	pcicfgregs *cfg = &dinfo->cfg;
2423 	uint16_t status;
2424 	int result;
2425 
2426 	if (cfg->pp.pp_cap != 0) {
2427 		status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2428 		switch (status & PCIM_PSTAT_DMASK) {
2429 		case PCIM_PSTAT_D0:
2430 			result = PCI_POWERSTATE_D0;
2431 			break;
2432 		case PCIM_PSTAT_D1:
2433 			result = PCI_POWERSTATE_D1;
2434 			break;
2435 		case PCIM_PSTAT_D2:
2436 			result = PCI_POWERSTATE_D2;
2437 			break;
2438 		case PCIM_PSTAT_D3:
2439 			result = PCI_POWERSTATE_D3;
2440 			break;
2441 		default:
2442 			result = PCI_POWERSTATE_UNKNOWN;
2443 			break;
2444 		}
2445 	} else {
2446 		/* No support, device is always at D0 */
2447 		result = PCI_POWERSTATE_D0;
2448 	}
2449 	return (result);
2450 }
2451 
2452 /*
2453  * Some convenience functions for PCI device drivers.
2454  */
2455 
2456 static __inline void
2457 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2458 {
2459 	uint16_t	command;
2460 
2461 	command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2462 	command |= bit;
2463 	PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2464 }
2465 
2466 static __inline void
2467 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2468 {
2469 	uint16_t	command;
2470 
2471 	command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2472 	command &= ~bit;
2473 	PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2474 }
2475 
2476 int
2477 pci_enable_busmaster_method(device_t dev, device_t child)
2478 {
2479 	pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2480 	return (0);
2481 }
2482 
2483 int
2484 pci_disable_busmaster_method(device_t dev, device_t child)
2485 {
2486 	pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
2487 	return (0);
2488 }
2489 
2490 int
2491 pci_enable_io_method(device_t dev, device_t child, int space)
2492 {
2493 	uint16_t bit;
2494 
2495 	switch(space) {
2496 	case SYS_RES_IOPORT:
2497 		bit = PCIM_CMD_PORTEN;
2498 		break;
2499 	case SYS_RES_MEMORY:
2500 		bit = PCIM_CMD_MEMEN;
2501 		break;
2502 	default:
2503 		return (EINVAL);
2504 	}
2505 	pci_set_command_bit(dev, child, bit);
2506 	return (0);
2507 }
2508 
2509 int
2510 pci_disable_io_method(device_t dev, device_t child, int space)
2511 {
2512 	uint16_t bit;
2513 
2514 	switch(space) {
2515 	case SYS_RES_IOPORT:
2516 		bit = PCIM_CMD_PORTEN;
2517 		break;
2518 	case SYS_RES_MEMORY:
2519 		bit = PCIM_CMD_MEMEN;
2520 		break;
2521 	default:
2522 		return (EINVAL);
2523 	}
2524 	pci_clear_command_bit(dev, child, bit);
2525 	return (0);
2526 }
2527 
2528 /*
2529  * New style pci driver.  Parent device is either a pci-host-bridge or a
2530  * pci-pci-bridge.  Both kinds are represented by instances of pcib.
2531  */
2532 
2533 void
2534 pci_print_verbose(struct pci_devinfo *dinfo)
2535 {
2536 
2537 	if (bootverbose) {
2538 		pcicfgregs *cfg = &dinfo->cfg;
2539 
2540 		printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
2541 		    cfg->vendor, cfg->device, cfg->revid);
2542 		printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
2543 		    cfg->domain, cfg->bus, cfg->slot, cfg->func);
2544 		printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
2545 		    cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
2546 		    cfg->mfdev);
2547 		printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
2548 		    cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
2549 		printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
2550 		    cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
2551 		    cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
2552 		if (cfg->intpin > 0)
2553 			printf("\tintpin=%c, irq=%d\n",
2554 			    cfg->intpin +'a' -1, cfg->intline);
2555 		if (cfg->pp.pp_cap) {
2556 			uint16_t status;
2557 
2558 			status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
2559 			printf("\tpowerspec %d  supports D0%s%s D3  current D%d\n",
2560 			    cfg->pp.pp_cap & PCIM_PCAP_SPEC,
2561 			    cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
2562 			    cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
2563 			    status & PCIM_PSTAT_DMASK);
2564 		}
2565 		if (cfg->msi.msi_location) {
2566 			int ctrl;
2567 
2568 			ctrl = cfg->msi.msi_ctrl;
2569 			printf("\tMSI supports %d message%s%s%s\n",
2570 			    cfg->msi.msi_msgnum,
2571 			    (cfg->msi.msi_msgnum == 1) ? "" : "s",
2572 			    (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
2573 			    (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
2574 		}
2575 		if (cfg->msix.msix_location) {
2576 			printf("\tMSI-X supports %d message%s ",
2577 			    cfg->msix.msix_msgnum,
2578 			    (cfg->msix.msix_msgnum == 1) ? "" : "s");
2579 			if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
2580 				printf("in map 0x%x\n",
2581 				    cfg->msix.msix_table_bar);
2582 			else
2583 				printf("in maps 0x%x and 0x%x\n",
2584 				    cfg->msix.msix_table_bar,
2585 				    cfg->msix.msix_pba_bar);
2586 		}
2587 	}
2588 }
2589 
2590 static int
2591 pci_porten(device_t dev)
2592 {
2593 	return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
2594 }
2595 
2596 static int
2597 pci_memen(device_t dev)
2598 {
2599 	return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
2600 }
2601 
2602 static void
2603 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp)
2604 {
2605 	struct pci_devinfo *dinfo;
2606 	pci_addr_t map, testval;
2607 	int ln2range;
2608 	uint16_t cmd;
2609 
2610 	/*
2611 	 * The device ROM BAR is special.  It is always a 32-bit
2612 	 * memory BAR.  Bit 0 is special and should not be set when
2613 	 * sizing the BAR.
2614 	 */
2615 	dinfo = device_get_ivars(dev);
2616 	if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
2617 		map = pci_read_config(dev, reg, 4);
2618 		pci_write_config(dev, reg, 0xfffffffe, 4);
2619 		testval = pci_read_config(dev, reg, 4);
2620 		pci_write_config(dev, reg, map, 4);
2621 		*mapp = map;
2622 		*testvalp = testval;
2623 		return;
2624 	}
2625 
2626 	map = pci_read_config(dev, reg, 4);
2627 	ln2range = pci_maprange(map);
2628 	if (ln2range == 64)
2629 		map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2630 
2631 	/*
2632 	 * Disable decoding via the command register before
2633 	 * determining the BAR's length since we will be placing it in
2634 	 * a weird state.
2635 	 */
2636 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2637 	pci_write_config(dev, PCIR_COMMAND,
2638 	    cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
2639 
2640 	/*
2641 	 * Determine the BAR's length by writing all 1's.  The bottom
2642 	 * log_2(size) bits of the BAR will stick as 0 when we read
2643 	 * the value back.
2644 	 */
2645 	pci_write_config(dev, reg, 0xffffffff, 4);
2646 	testval = pci_read_config(dev, reg, 4);
2647 	if (ln2range == 64) {
2648 		pci_write_config(dev, reg + 4, 0xffffffff, 4);
2649 		testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
2650 	}
2651 
2652 	/*
2653 	 * Restore the original value of the BAR.  We may have reprogrammed
2654 	 * the BAR of the low-level console device and when booting verbose,
2655 	 * we need the console device addressable.
2656 	 */
2657 	pci_write_config(dev, reg, map, 4);
2658 	if (ln2range == 64)
2659 		pci_write_config(dev, reg + 4, map >> 32, 4);
2660 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2661 
2662 	*mapp = map;
2663 	*testvalp = testval;
2664 }
2665 
2666 static void
2667 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
2668 {
2669 	struct pci_devinfo *dinfo;
2670 	int ln2range;
2671 
2672 	/* The device ROM BAR is always a 32-bit memory BAR. */
2673 	dinfo = device_get_ivars(dev);
2674 	if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2675 		ln2range = 32;
2676 	else
2677 		ln2range = pci_maprange(pm->pm_value);
2678 	pci_write_config(dev, pm->pm_reg, base, 4);
2679 	if (ln2range == 64)
2680 		pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
2681 	pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
2682 	if (ln2range == 64)
2683 		pm->pm_value |= (pci_addr_t)pci_read_config(dev,
2684 		    pm->pm_reg + 4, 4) << 32;
2685 }
2686 
2687 struct pci_map *
2688 pci_find_bar(device_t dev, int reg)
2689 {
2690 	struct pci_devinfo *dinfo;
2691 	struct pci_map *pm;
2692 
2693 	dinfo = device_get_ivars(dev);
2694 	STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2695 		if (pm->pm_reg == reg)
2696 			return (pm);
2697 	}
2698 	return (NULL);
2699 }
2700 
2701 int
2702 pci_bar_enabled(device_t dev, struct pci_map *pm)
2703 {
2704 	struct pci_devinfo *dinfo;
2705 	uint16_t cmd;
2706 
2707 	dinfo = device_get_ivars(dev);
2708 	if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
2709 	    !(pm->pm_value & PCIM_BIOS_ENABLE))
2710 		return (0);
2711 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2712 	if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
2713 		return ((cmd & PCIM_CMD_MEMEN) != 0);
2714 	else
2715 		return ((cmd & PCIM_CMD_PORTEN) != 0);
2716 }
2717 
2718 static struct pci_map *
2719 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
2720 {
2721 	struct pci_devinfo *dinfo;
2722 	struct pci_map *pm, *prev;
2723 
2724 	dinfo = device_get_ivars(dev);
2725 	pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
2726 	pm->pm_reg = reg;
2727 	pm->pm_value = value;
2728 	pm->pm_size = size;
2729 	STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
2730 		KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
2731 		    reg));
2732 		if (STAILQ_NEXT(prev, pm_link) == NULL ||
2733 		    STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
2734 			break;
2735 	}
2736 	if (prev != NULL)
2737 		STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
2738 	else
2739 		STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
2740 	return (pm);
2741 }
2742 
2743 static void
2744 pci_restore_bars(device_t dev)
2745 {
2746 	struct pci_devinfo *dinfo;
2747 	struct pci_map *pm;
2748 	int ln2range;
2749 
2750 	dinfo = device_get_ivars(dev);
2751 	STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
2752 		if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
2753 			ln2range = 32;
2754 		else
2755 			ln2range = pci_maprange(pm->pm_value);
2756 		pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
2757 		if (ln2range == 64)
2758 			pci_write_config(dev, pm->pm_reg + 4,
2759 			    pm->pm_value >> 32, 4);
2760 	}
2761 }
2762 
2763 /*
2764  * Add a resource based on a pci map register. Return 1 if the map
2765  * register is a 32bit map register or 2 if it is a 64bit register.
2766  */
2767 static int
2768 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
2769     int force, int prefetch)
2770 {
2771 	struct pci_map *pm;
2772 	pci_addr_t base, map, testval;
2773 	pci_addr_t start, end, count;
2774 	int barlen, basezero, flags, maprange, mapsize, type;
2775 	uint16_t cmd;
2776 	struct resource *res;
2777 
2778 	/*
2779 	 * The BAR may already exist if the device is a CardBus card
2780 	 * whose CIS is stored in this BAR.
2781 	 */
2782 	pm = pci_find_bar(dev, reg);
2783 	if (pm != NULL) {
2784 		maprange = pci_maprange(pm->pm_value);
2785 		barlen = maprange == 64 ? 2 : 1;
2786 		return (barlen);
2787 	}
2788 
2789 	pci_read_bar(dev, reg, &map, &testval);
2790 	if (PCI_BAR_MEM(map)) {
2791 		type = SYS_RES_MEMORY;
2792 		if (map & PCIM_BAR_MEM_PREFETCH)
2793 			prefetch = 1;
2794 	} else
2795 		type = SYS_RES_IOPORT;
2796 	mapsize = pci_mapsize(testval);
2797 	base = pci_mapbase(map);
2798 #ifdef __PCI_BAR_ZERO_VALID
2799 	basezero = 0;
2800 #else
2801 	basezero = base == 0;
2802 #endif
2803 	maprange = pci_maprange(map);
2804 	barlen = maprange == 64 ? 2 : 1;
2805 
2806 	/*
2807 	 * For I/O registers, if bottom bit is set, and the next bit up
2808 	 * isn't clear, we know we have a BAR that doesn't conform to the
2809 	 * spec, so ignore it.  Also, sanity check the size of the data
2810 	 * areas to the type of memory involved.  Memory must be at least
2811 	 * 16 bytes in size, while I/O ranges must be at least 4.
2812 	 */
2813 	if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
2814 		return (barlen);
2815 	if ((type == SYS_RES_MEMORY && mapsize < 4) ||
2816 	    (type == SYS_RES_IOPORT && mapsize < 2))
2817 		return (barlen);
2818 
2819 	/* Save a record of this BAR. */
2820 	pm = pci_add_bar(dev, reg, map, mapsize);
2821 	if (bootverbose) {
2822 		printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
2823 		    reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
2824 		if (type == SYS_RES_IOPORT && !pci_porten(dev))
2825 			printf(", port disabled\n");
2826 		else if (type == SYS_RES_MEMORY && !pci_memen(dev))
2827 			printf(", memory disabled\n");
2828 		else
2829 			printf(", enabled\n");
2830 	}
2831 
2832 	/*
2833 	 * If base is 0, then we have problems if this architecture does
2834 	 * not allow that.  It is best to ignore such entries for the
2835 	 * moment.  These will be allocated later if the driver specifically
2836 	 * requests them.  However, some removable busses look better when
2837 	 * all resources are allocated, so allow '0' to be overriden.
2838 	 *
2839 	 * Similarly treat maps whose values is the same as the test value
2840 	 * read back.  These maps have had all f's written to them by the
2841 	 * BIOS in an attempt to disable the resources.
2842 	 */
2843 	if (!force && (basezero || map == testval))
2844 		return (barlen);
2845 	if ((u_long)base != base) {
2846 		device_printf(bus,
2847 		    "pci%d:%d:%d:%d bar %#x too many address bits",
2848 		    pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
2849 		    pci_get_function(dev), reg);
2850 		return (barlen);
2851 	}
2852 
2853 	/*
2854 	 * This code theoretically does the right thing, but has
2855 	 * undesirable side effects in some cases where peripherals
2856 	 * respond oddly to having these bits enabled.  Let the user
2857 	 * be able to turn them off (since pci_enable_io_modes is 1 by
2858 	 * default).
2859 	 */
2860 	if (pci_enable_io_modes) {
2861 		/* Turn on resources that have been left off by a lazy BIOS */
2862 		if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
2863 			cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2864 			cmd |= PCIM_CMD_PORTEN;
2865 			pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2866 		}
2867 		if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
2868 			cmd = pci_read_config(dev, PCIR_COMMAND, 2);
2869 			cmd |= PCIM_CMD_MEMEN;
2870 			pci_write_config(dev, PCIR_COMMAND, cmd, 2);
2871 		}
2872 	} else {
2873 		if (type == SYS_RES_IOPORT && !pci_porten(dev))
2874 			return (barlen);
2875 		if (type == SYS_RES_MEMORY && !pci_memen(dev))
2876 			return (barlen);
2877 	}
2878 
2879 	count = (pci_addr_t)1 << mapsize;
2880 	flags = RF_ALIGNMENT_LOG2(mapsize);
2881 	if (prefetch)
2882 		flags |= RF_PREFETCHABLE;
2883 	if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
2884 		start = 0;	/* Let the parent decide. */
2885 		end = ~0ul;
2886 	} else {
2887 		start = base;
2888 		end = base + count - 1;
2889 	}
2890 	resource_list_add(rl, type, reg, start, end, count);
2891 
2892 	/*
2893 	 * Try to allocate the resource for this BAR from our parent
2894 	 * so that this resource range is already reserved.  The
2895 	 * driver for this device will later inherit this resource in
2896 	 * pci_alloc_resource().
2897 	 */
2898 	res = resource_list_reserve(rl, bus, dev, type, &reg, start, end, count,
2899 	    flags);
2900 	if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) {
2901 		/*
2902 		 * If the allocation fails, try to allocate a resource for
2903 		 * this BAR using any available range.  The firmware felt
2904 		 * it was important enough to assign a resource, so don't
2905 		 * disable decoding if we can help it.
2906 		 */
2907 		resource_list_delete(rl, type, reg);
2908 		resource_list_add(rl, type, reg, 0, ~0ul, count);
2909 		res = resource_list_reserve(rl, bus, dev, type, &reg, 0, ~0ul,
2910 		    count, flags);
2911 	}
2912 	if (res == NULL) {
2913 		/*
2914 		 * If the allocation fails, delete the resource list entry
2915 		 * and disable decoding for this device.
2916 		 *
2917 		 * If the driver requests this resource in the future,
2918 		 * pci_reserve_map() will try to allocate a fresh
2919 		 * resource range.
2920 		 */
2921 		resource_list_delete(rl, type, reg);
2922 		pci_disable_io(dev, type);
2923 		if (bootverbose)
2924 			device_printf(bus,
2925 			    "pci%d:%d:%d:%d bar %#x failed to allocate\n",
2926 			    pci_get_domain(dev), pci_get_bus(dev),
2927 			    pci_get_slot(dev), pci_get_function(dev), reg);
2928 	} else {
2929 		start = rman_get_start(res);
2930 		pci_write_bar(dev, pm, start);
2931 	}
2932 	return (barlen);
2933 }
2934 
2935 /*
2936  * For ATA devices we need to decide early what addressing mode to use.
2937  * Legacy demands that the primary and secondary ATA ports sits on the
2938  * same addresses that old ISA hardware did. This dictates that we use
2939  * those addresses and ignore the BAR's if we cannot set PCI native
2940  * addressing mode.
2941  */
2942 static void
2943 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
2944     uint32_t prefetchmask)
2945 {
2946 	struct resource *r;
2947 	int rid, type, progif;
2948 #if 0
2949 	/* if this device supports PCI native addressing use it */
2950 	progif = pci_read_config(dev, PCIR_PROGIF, 1);
2951 	if ((progif & 0x8a) == 0x8a) {
2952 		if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
2953 		    pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
2954 			printf("Trying ATA native PCI addressing mode\n");
2955 			pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
2956 		}
2957 	}
2958 #endif
2959 	progif = pci_read_config(dev, PCIR_PROGIF, 1);
2960 	type = SYS_RES_IOPORT;
2961 	if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
2962 		pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
2963 		    prefetchmask & (1 << 0));
2964 		pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
2965 		    prefetchmask & (1 << 1));
2966 	} else {
2967 		rid = PCIR_BAR(0);
2968 		resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
2969 		r = resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
2970 		    0x1f7, 8, 0);
2971 		rid = PCIR_BAR(1);
2972 		resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
2973 		r = resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
2974 		    0x3f6, 1, 0);
2975 	}
2976 	if (progif & PCIP_STORAGE_IDE_MODESEC) {
2977 		pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
2978 		    prefetchmask & (1 << 2));
2979 		pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
2980 		    prefetchmask & (1 << 3));
2981 	} else {
2982 		rid = PCIR_BAR(2);
2983 		resource_list_add(rl, type, rid, 0x170, 0x177, 8);
2984 		r = resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
2985 		    0x177, 8, 0);
2986 		rid = PCIR_BAR(3);
2987 		resource_list_add(rl, type, rid, 0x376, 0x376, 1);
2988 		r = resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
2989 		    0x376, 1, 0);
2990 	}
2991 	pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
2992 	    prefetchmask & (1 << 4));
2993 	pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
2994 	    prefetchmask & (1 << 5));
2995 }
2996 
2997 static void
2998 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
2999 {
3000 	struct pci_devinfo *dinfo = device_get_ivars(dev);
3001 	pcicfgregs *cfg = &dinfo->cfg;
3002 	char tunable_name[64];
3003 	int irq;
3004 
3005 	/* Has to have an intpin to have an interrupt. */
3006 	if (cfg->intpin == 0)
3007 		return;
3008 
3009 	/* Let the user override the IRQ with a tunable. */
3010 	irq = PCI_INVALID_IRQ;
3011 	snprintf(tunable_name, sizeof(tunable_name),
3012 	    "hw.pci%d.%d.%d.INT%c.irq",
3013 	    cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3014 	if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3015 		irq = PCI_INVALID_IRQ;
3016 
3017 	/*
3018 	 * If we didn't get an IRQ via the tunable, then we either use the
3019 	 * IRQ value in the intline register or we ask the bus to route an
3020 	 * interrupt for us.  If force_route is true, then we only use the
3021 	 * value in the intline register if the bus was unable to assign an
3022 	 * IRQ.
3023 	 */
3024 	if (!PCI_INTERRUPT_VALID(irq)) {
3025 		if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3026 			irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3027 		if (!PCI_INTERRUPT_VALID(irq))
3028 			irq = cfg->intline;
3029 	}
3030 
3031 	/* If after all that we don't have an IRQ, just bail. */
3032 	if (!PCI_INTERRUPT_VALID(irq))
3033 		return;
3034 
3035 	/* Update the config register if it changed. */
3036 	if (irq != cfg->intline) {
3037 		cfg->intline = irq;
3038 		pci_write_config(dev, PCIR_INTLINE, irq, 1);
3039 	}
3040 
3041 	/* Add this IRQ as rid 0 interrupt resource. */
3042 	resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3043 }
3044 
3045 /* Perform early OHCI takeover from SMM. */
3046 static void
3047 ohci_early_takeover(device_t self)
3048 {
3049 	struct resource *res;
3050 	uint32_t ctl;
3051 	int rid;
3052 	int i;
3053 
3054 	rid = PCIR_BAR(0);
3055 	res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3056 	if (res == NULL)
3057 		return;
3058 
3059 	ctl = bus_read_4(res, OHCI_CONTROL);
3060 	if (ctl & OHCI_IR) {
3061 		if (bootverbose)
3062 			printf("ohci early: "
3063 			    "SMM active, request owner change\n");
3064 		bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3065 		for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3066 			DELAY(1000);
3067 			ctl = bus_read_4(res, OHCI_CONTROL);
3068 		}
3069 		if (ctl & OHCI_IR) {
3070 			if (bootverbose)
3071 				printf("ohci early: "
3072 				    "SMM does not respond, resetting\n");
3073 			bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3074 		}
3075 		/* Disable interrupts */
3076 		bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3077 	}
3078 
3079 	bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3080 }
3081 
3082 /* Perform early UHCI takeover from SMM. */
3083 static void
3084 uhci_early_takeover(device_t self)
3085 {
3086 	struct resource *res;
3087 	int rid;
3088 
3089 	/*
3090 	 * Set the PIRQD enable bit and switch off all the others. We don't
3091 	 * want legacy support to interfere with us XXX Does this also mean
3092 	 * that the BIOS won't touch the keyboard anymore if it is connected
3093 	 * to the ports of the root hub?
3094 	 */
3095 	pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3096 
3097 	/* Disable interrupts */
3098 	rid = PCI_UHCI_BASE_REG;
3099 	res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3100 	if (res != NULL) {
3101 		bus_write_2(res, UHCI_INTR, 0);
3102 		bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3103 	}
3104 }
3105 
3106 /* Perform early EHCI takeover from SMM. */
3107 static void
3108 ehci_early_takeover(device_t self)
3109 {
3110 	struct resource *res;
3111 	uint32_t cparams;
3112 	uint32_t eec;
3113 	uint8_t eecp;
3114 	uint8_t bios_sem;
3115 	uint8_t offs;
3116 	int rid;
3117 	int i;
3118 
3119 	rid = PCIR_BAR(0);
3120 	res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3121 	if (res == NULL)
3122 		return;
3123 
3124 	cparams = bus_read_4(res, EHCI_HCCPARAMS);
3125 
3126 	/* Synchronise with the BIOS if it owns the controller. */
3127 	for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3128 	    eecp = EHCI_EECP_NEXT(eec)) {
3129 		eec = pci_read_config(self, eecp, 4);
3130 		if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3131 			continue;
3132 		}
3133 		bios_sem = pci_read_config(self, eecp +
3134 		    EHCI_LEGSUP_BIOS_SEM, 1);
3135 		if (bios_sem == 0) {
3136 			continue;
3137 		}
3138 		if (bootverbose)
3139 			printf("ehci early: "
3140 			    "SMM active, request owner change\n");
3141 
3142 		pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3143 
3144 		for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3145 			DELAY(1000);
3146 			bios_sem = pci_read_config(self, eecp +
3147 			    EHCI_LEGSUP_BIOS_SEM, 1);
3148 		}
3149 
3150 		if (bios_sem != 0) {
3151 			if (bootverbose)
3152 				printf("ehci early: "
3153 				    "SMM does not respond\n");
3154 		}
3155 		/* Disable interrupts */
3156 		offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3157 		bus_write_4(res, offs + EHCI_USBINTR, 0);
3158 	}
3159 	bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3160 }
3161 
3162 /* Perform early XHCI takeover from SMM. */
3163 static void
3164 xhci_early_takeover(device_t self)
3165 {
3166 	struct resource *res;
3167 	uint32_t cparams;
3168 	uint32_t eec;
3169 	uint8_t eecp;
3170 	uint8_t bios_sem;
3171 	uint8_t offs;
3172 	int rid;
3173 	int i;
3174 
3175 	rid = PCIR_BAR(0);
3176 	res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3177 	if (res == NULL)
3178 		return;
3179 
3180 	cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3181 
3182 	eec = -1;
3183 
3184 	/* Synchronise with the BIOS if it owns the controller. */
3185 	for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3186 	    eecp += XHCI_XECP_NEXT(eec) << 2) {
3187 		eec = bus_read_4(res, eecp);
3188 
3189 		if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3190 			continue;
3191 
3192 		bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3193 		if (bios_sem == 0)
3194 			continue;
3195 
3196 		if (bootverbose)
3197 			printf("xhci early: "
3198 			    "SMM active, request owner change\n");
3199 
3200 		bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3201 
3202 		/* wait a maximum of 5 second */
3203 
3204 		for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3205 			DELAY(1000);
3206 			bios_sem = bus_read_1(res, eecp +
3207 			    XHCI_XECP_BIOS_SEM);
3208 		}
3209 
3210 		if (bios_sem != 0) {
3211 			if (bootverbose)
3212 				printf("xhci early: "
3213 				    "SMM does not respond\n");
3214 		}
3215 
3216 		/* Disable interrupts */
3217 		offs = bus_read_1(res, XHCI_CAPLENGTH);
3218 		bus_write_4(res, offs + XHCI_USBCMD, 0);
3219 		bus_read_4(res, offs + XHCI_USBSTS);
3220 	}
3221 	bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3222 }
3223 
3224 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3225 static void
3226 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3227     struct resource_list *rl)
3228 {
3229 	struct resource *res;
3230 	char *cp;
3231 	u_long start, end, count;
3232 	int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3233 
3234 	switch (cfg->hdrtype & PCIM_HDRTYPE) {
3235 	case PCIM_HDRTYPE_BRIDGE:
3236 		sec_reg = PCIR_SECBUS_1;
3237 		sub_reg = PCIR_SUBBUS_1;
3238 		break;
3239 	case PCIM_HDRTYPE_CARDBUS:
3240 		sec_reg = PCIR_SECBUS_2;
3241 		sub_reg = PCIR_SUBBUS_2;
3242 		break;
3243 	default:
3244 		return;
3245 	}
3246 
3247 	/*
3248 	 * If the existing bus range is valid, attempt to reserve it
3249 	 * from our parent.  If this fails for any reason, clear the
3250 	 * secbus and subbus registers.
3251 	 *
3252 	 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3253 	 * This would at least preserve the existing sec_bus if it is
3254 	 * valid.
3255 	 */
3256 	sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3257 	sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3258 
3259 	/* Quirk handling. */
3260 	switch (pci_get_devid(dev)) {
3261 	case 0x12258086:		/* Intel 82454KX/GX (Orion) */
3262 		sup_bus = pci_read_config(dev, 0x41, 1);
3263 		if (sup_bus != 0xff) {
3264 			sec_bus = sup_bus + 1;
3265 			sub_bus = sup_bus + 1;
3266 			PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3267 			PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3268 		}
3269 		break;
3270 
3271 	case 0x00dd10de:
3272 		/* Compaq R3000 BIOS sets wrong subordinate bus number. */
3273 		if ((cp = getenv("smbios.planar.maker")) == NULL)
3274 			break;
3275 		if (strncmp(cp, "Compal", 6) != 0) {
3276 			freeenv(cp);
3277 			break;
3278 		}
3279 		freeenv(cp);
3280 		if ((cp = getenv("smbios.planar.product")) == NULL)
3281 			break;
3282 		if (strncmp(cp, "08A0", 4) != 0) {
3283 			freeenv(cp);
3284 			break;
3285 		}
3286 		freeenv(cp);
3287 		if (sub_bus < 0xa) {
3288 			sub_bus = 0xa;
3289 			PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3290 		}
3291 		break;
3292 	}
3293 
3294 	if (bootverbose)
3295 		printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3296 	if (sec_bus > 0 && sub_bus >= sec_bus) {
3297 		start = sec_bus;
3298 		end = sub_bus;
3299 		count = end - start + 1;
3300 
3301 		resource_list_add(rl, PCI_RES_BUS, 0, 0ul, ~0ul, count);
3302 
3303 		/*
3304 		 * If requested, clear secondary bus registers in
3305 		 * bridge devices to force a complete renumbering
3306 		 * rather than reserving the existing range.  However,
3307 		 * preserve the existing size.
3308 		 */
3309 		if (pci_clear_buses)
3310 			goto clear;
3311 
3312 		rid = 0;
3313 		res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3314 		    start, end, count, 0);
3315 		if (res != NULL)
3316 			return;
3317 
3318 		if (bootverbose)
3319 			device_printf(bus,
3320 			    "pci%d:%d:%d:%d secbus failed to allocate\n",
3321 			    pci_get_domain(dev), pci_get_bus(dev),
3322 			    pci_get_slot(dev), pci_get_function(dev));
3323 	}
3324 
3325 clear:
3326 	PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3327 	PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3328 }
3329 
3330 static struct resource *
3331 pci_alloc_secbus(device_t dev, device_t child, int *rid, u_long start,
3332     u_long end, u_long count, u_int flags)
3333 {
3334 	struct pci_devinfo *dinfo;
3335 	pcicfgregs *cfg;
3336 	struct resource_list *rl;
3337 	struct resource *res;
3338 	int sec_reg, sub_reg;
3339 
3340 	dinfo = device_get_ivars(child);
3341 	cfg = &dinfo->cfg;
3342 	rl = &dinfo->resources;
3343 	switch (cfg->hdrtype & PCIM_HDRTYPE) {
3344 	case PCIM_HDRTYPE_BRIDGE:
3345 		sec_reg = PCIR_SECBUS_1;
3346 		sub_reg = PCIR_SUBBUS_1;
3347 		break;
3348 	case PCIM_HDRTYPE_CARDBUS:
3349 		sec_reg = PCIR_SECBUS_2;
3350 		sub_reg = PCIR_SUBBUS_2;
3351 		break;
3352 	default:
3353 		return (NULL);
3354 	}
3355 
3356 	if (*rid != 0)
3357 		return (NULL);
3358 
3359 	if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3360 		resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3361 	if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3362 		res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3363 		    start, end, count, flags & ~RF_ACTIVE);
3364 		if (res == NULL) {
3365 			resource_list_delete(rl, PCI_RES_BUS, *rid);
3366 			device_printf(child, "allocating %lu bus%s failed\n",
3367 			    count, count == 1 ? "" : "es");
3368 			return (NULL);
3369 		}
3370 		if (bootverbose)
3371 			device_printf(child,
3372 			    "Lazy allocation of %lu bus%s at %lu\n", count,
3373 			    count == 1 ? "" : "es", rman_get_start(res));
3374 		PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3375 		PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3376 	}
3377 	return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3378 	    end, count, flags));
3379 }
3380 #endif
3381 
3382 void
3383 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
3384 {
3385 	struct pci_devinfo *dinfo;
3386 	pcicfgregs *cfg;
3387 	struct resource_list *rl;
3388 	const struct pci_quirk *q;
3389 	uint32_t devid;
3390 	int i;
3391 
3392 	dinfo = device_get_ivars(dev);
3393 	cfg = &dinfo->cfg;
3394 	rl = &dinfo->resources;
3395 	devid = (cfg->device << 16) | cfg->vendor;
3396 
3397 	/* ATA devices needs special map treatment */
3398 	if ((pci_get_class(dev) == PCIC_STORAGE) &&
3399 	    (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
3400 	    ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
3401 	     (!pci_read_config(dev, PCIR_BAR(0), 4) &&
3402 	      !pci_read_config(dev, PCIR_BAR(2), 4))) )
3403 		pci_ata_maps(bus, dev, rl, force, prefetchmask);
3404 	else
3405 		for (i = 0; i < cfg->nummaps;) {
3406 			/*
3407 			 * Skip quirked resources.
3408 			 */
3409 			for (q = &pci_quirks[0]; q->devid != 0; q++)
3410 				if (q->devid == devid &&
3411 				    q->type == PCI_QUIRK_UNMAP_REG &&
3412 				    q->arg1 == PCIR_BAR(i))
3413 					break;
3414 			if (q->devid != 0) {
3415 				i++;
3416 				continue;
3417 			}
3418 			i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
3419 			    prefetchmask & (1 << i));
3420 		}
3421 
3422 	/*
3423 	 * Add additional, quirked resources.
3424 	 */
3425 	for (q = &pci_quirks[0]; q->devid != 0; q++)
3426 		if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
3427 			pci_add_map(bus, dev, q->arg1, rl, force, 0);
3428 
3429 	if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
3430 #ifdef __PCI_REROUTE_INTERRUPT
3431 		/*
3432 		 * Try to re-route interrupts. Sometimes the BIOS or
3433 		 * firmware may leave bogus values in these registers.
3434 		 * If the re-route fails, then just stick with what we
3435 		 * have.
3436 		 */
3437 		pci_assign_interrupt(bus, dev, 1);
3438 #else
3439 		pci_assign_interrupt(bus, dev, 0);
3440 #endif
3441 	}
3442 
3443 	if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
3444 	    pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
3445 		if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
3446 			xhci_early_takeover(dev);
3447 		else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
3448 			ehci_early_takeover(dev);
3449 		else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
3450 			ohci_early_takeover(dev);
3451 		else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
3452 			uhci_early_takeover(dev);
3453 	}
3454 
3455 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3456 	/*
3457 	 * Reserve resources for secondary bus ranges behind bridge
3458 	 * devices.
3459 	 */
3460 	pci_reserve_secbus(bus, dev, cfg, rl);
3461 #endif
3462 }
3463 
3464 static struct pci_devinfo *
3465 pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
3466     int slot, int func, size_t dinfo_size)
3467 {
3468 	struct pci_devinfo *dinfo;
3469 
3470 	dinfo = pci_read_device(pcib, domain, busno, slot, func, dinfo_size);
3471 	if (dinfo != NULL)
3472 		pci_add_child(dev, dinfo);
3473 
3474 	return (dinfo);
3475 }
3476 
3477 void
3478 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size)
3479 {
3480 #define	REG(n, w)	PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
3481 	device_t pcib = device_get_parent(dev);
3482 	struct pci_devinfo *dinfo;
3483 	int maxslots;
3484 	int s, f, pcifunchigh;
3485 	uint8_t hdrtype;
3486 	int first_func;
3487 
3488 	/*
3489 	 * Try to detect a device at slot 0, function 0.  If it exists, try to
3490 	 * enable ARI.  We must enable ARI before detecting the rest of the
3491 	 * functions on this bus as ARI changes the set of slots and functions
3492 	 * that are legal on this bus.
3493 	 */
3494 	dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0,
3495 	    dinfo_size);
3496 	if (dinfo != NULL && pci_enable_ari)
3497 		PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
3498 
3499 	/*
3500 	 * Start looking for new devices on slot 0 at function 1 because we
3501 	 * just identified the device at slot 0, function 0.
3502 	 */
3503 	first_func = 1;
3504 
3505 	KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
3506 	    ("dinfo_size too small"));
3507 	maxslots = PCIB_MAXSLOTS(pcib);
3508 	for (s = 0; s <= maxslots; s++, first_func = 0) {
3509 		pcifunchigh = 0;
3510 		f = 0;
3511 		DELAY(1);
3512 		hdrtype = REG(PCIR_HDRTYPE, 1);
3513 		if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
3514 			continue;
3515 		if (hdrtype & PCIM_MFDEV)
3516 			pcifunchigh = PCIB_MAXFUNCS(pcib);
3517 		for (f = first_func; f <= pcifunchigh; f++)
3518 			pci_identify_function(pcib, dev, domain, busno, s, f,
3519 			    dinfo_size);
3520 	}
3521 #undef REG
3522 }
3523 
3524 void
3525 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
3526 {
3527 	dinfo->cfg.dev = device_add_child(bus, NULL, -1);
3528 	device_set_ivars(dinfo->cfg.dev, dinfo);
3529 	resource_list_init(&dinfo->resources);
3530 	pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
3531 	pci_cfg_restore(dinfo->cfg.dev, dinfo);
3532 	pci_print_verbose(dinfo);
3533 	pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
3534 	pci_child_added(dinfo->cfg.dev);
3535 }
3536 
3537 void
3538 pci_child_added_method(device_t dev, device_t child)
3539 {
3540 
3541 }
3542 
3543 static int
3544 pci_probe(device_t dev)
3545 {
3546 
3547 	device_set_desc(dev, "PCI bus");
3548 
3549 	/* Allow other subclasses to override this driver. */
3550 	return (BUS_PROBE_GENERIC);
3551 }
3552 
3553 int
3554 pci_attach_common(device_t dev)
3555 {
3556 	struct pci_softc *sc;
3557 	int busno, domain;
3558 #ifdef PCI_DMA_BOUNDARY
3559 	int error, tag_valid;
3560 #endif
3561 #ifdef PCI_RES_BUS
3562 	int rid;
3563 #endif
3564 
3565 	sc = device_get_softc(dev);
3566 	domain = pcib_get_domain(dev);
3567 	busno = pcib_get_bus(dev);
3568 #ifdef PCI_RES_BUS
3569 	rid = 0;
3570 	sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
3571 	    1, 0);
3572 	if (sc->sc_bus == NULL) {
3573 		device_printf(dev, "failed to allocate bus number\n");
3574 		return (ENXIO);
3575 	}
3576 #endif
3577 	if (bootverbose)
3578 		device_printf(dev, "domain=%d, physical bus=%d\n",
3579 		    domain, busno);
3580 #ifdef PCI_DMA_BOUNDARY
3581 	tag_valid = 0;
3582 	if (device_get_devclass(device_get_parent(device_get_parent(dev))) !=
3583 	    devclass_find("pci")) {
3584 		error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
3585 		    PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3586 		    NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
3587 		    BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag);
3588 		if (error)
3589 			device_printf(dev, "Failed to create DMA tag: %d\n",
3590 			    error);
3591 		else
3592 			tag_valid = 1;
3593 	}
3594 	if (!tag_valid)
3595 #endif
3596 		sc->sc_dma_tag = bus_get_dma_tag(dev);
3597 	return (0);
3598 }
3599 
3600 static int
3601 pci_attach(device_t dev)
3602 {
3603 	int busno, domain, error;
3604 
3605 	error = pci_attach_common(dev);
3606 	if (error)
3607 		return (error);
3608 
3609 	/*
3610 	 * Since there can be multiple independantly numbered PCI
3611 	 * busses on systems with multiple PCI domains, we can't use
3612 	 * the unit number to decide which bus we are probing. We ask
3613 	 * the parent pcib what our domain and bus numbers are.
3614 	 */
3615 	domain = pcib_get_domain(dev);
3616 	busno = pcib_get_bus(dev);
3617 	pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo));
3618 	return (bus_generic_attach(dev));
3619 }
3620 
3621 #ifdef PCI_RES_BUS
3622 static int
3623 pci_detach(device_t dev)
3624 {
3625 	struct pci_softc *sc;
3626 	int error;
3627 
3628 	error = bus_generic_detach(dev);
3629 	if (error)
3630 		return (error);
3631 	sc = device_get_softc(dev);
3632 	return (bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus));
3633 }
3634 #endif
3635 
3636 static void
3637 pci_set_power_child(device_t dev, device_t child, int state)
3638 {
3639 	struct pci_devinfo *dinfo;
3640 	device_t pcib;
3641 	int dstate;
3642 
3643 	/*
3644 	 * Set the device to the given state.  If the firmware suggests
3645 	 * a different power state, use it instead.  If power management
3646 	 * is not present, the firmware is responsible for managing
3647 	 * device power.  Skip children who aren't attached since they
3648 	 * are handled separately.
3649 	 */
3650 	pcib = device_get_parent(dev);
3651 	dinfo = device_get_ivars(child);
3652 	dstate = state;
3653 	if (device_is_attached(child) &&
3654 	    PCIB_POWER_FOR_SLEEP(pcib, dev, &dstate) == 0)
3655 		pci_set_powerstate(child, dstate);
3656 }
3657 
3658 int
3659 pci_suspend_child(device_t dev, device_t child)
3660 {
3661 	struct pci_devinfo *dinfo;
3662 	int error;
3663 
3664 	dinfo = device_get_ivars(child);
3665 
3666 	/*
3667 	 * Save the PCI configuration space for the child and set the
3668 	 * device in the appropriate power state for this sleep state.
3669 	 */
3670 	pci_cfg_save(child, dinfo, 0);
3671 
3672 	/* Suspend devices before potentially powering them down. */
3673 	error = bus_generic_suspend_child(dev, child);
3674 
3675 	if (error)
3676 		return (error);
3677 
3678 	if (pci_do_power_suspend)
3679 		pci_set_power_child(dev, child, PCI_POWERSTATE_D3);
3680 
3681 	return (0);
3682 }
3683 
3684 int
3685 pci_resume_child(device_t dev, device_t child)
3686 {
3687 	struct pci_devinfo *dinfo;
3688 
3689 	if (pci_do_power_resume)
3690 		pci_set_power_child(dev, child, PCI_POWERSTATE_D0);
3691 
3692 	dinfo = device_get_ivars(child);
3693 	pci_cfg_restore(child, dinfo);
3694 	if (!device_is_attached(child))
3695 		pci_cfg_save(child, dinfo, 1);
3696 
3697 	bus_generic_resume_child(dev, child);
3698 
3699 	return (0);
3700 }
3701 
3702 int
3703 pci_resume(device_t dev)
3704 {
3705 	device_t child, *devlist;
3706 	int error, i, numdevs;
3707 
3708 	if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
3709 		return (error);
3710 
3711 	/*
3712 	 * Resume critical devices first, then everything else later.
3713 	 */
3714 	for (i = 0; i < numdevs; i++) {
3715 		child = devlist[i];
3716 		switch (pci_get_class(child)) {
3717 		case PCIC_DISPLAY:
3718 		case PCIC_MEMORY:
3719 		case PCIC_BRIDGE:
3720 		case PCIC_BASEPERIPH:
3721 			BUS_RESUME_CHILD(dev, child);
3722 			break;
3723 		}
3724 	}
3725 	for (i = 0; i < numdevs; i++) {
3726 		child = devlist[i];
3727 		switch (pci_get_class(child)) {
3728 		case PCIC_DISPLAY:
3729 		case PCIC_MEMORY:
3730 		case PCIC_BRIDGE:
3731 		case PCIC_BASEPERIPH:
3732 			break;
3733 		default:
3734 			BUS_RESUME_CHILD(dev, child);
3735 		}
3736 	}
3737 	free(devlist, M_TEMP);
3738 	return (0);
3739 }
3740 
3741 static void
3742 pci_load_vendor_data(void)
3743 {
3744 	caddr_t data;
3745 	void *ptr;
3746 	size_t sz;
3747 
3748 	data = preload_search_by_type("pci_vendor_data");
3749 	if (data != NULL) {
3750 		ptr = preload_fetch_addr(data);
3751 		sz = preload_fetch_size(data);
3752 		if (ptr != NULL && sz != 0) {
3753 			pci_vendordata = ptr;
3754 			pci_vendordata_size = sz;
3755 			/* terminate the database */
3756 			pci_vendordata[pci_vendordata_size] = '\n';
3757 		}
3758 	}
3759 }
3760 
3761 void
3762 pci_driver_added(device_t dev, driver_t *driver)
3763 {
3764 	int numdevs;
3765 	device_t *devlist;
3766 	device_t child;
3767 	struct pci_devinfo *dinfo;
3768 	int i;
3769 
3770 	if (bootverbose)
3771 		device_printf(dev, "driver added\n");
3772 	DEVICE_IDENTIFY(driver, dev);
3773 	if (device_get_children(dev, &devlist, &numdevs) != 0)
3774 		return;
3775 	for (i = 0; i < numdevs; i++) {
3776 		child = devlist[i];
3777 		if (device_get_state(child) != DS_NOTPRESENT)
3778 			continue;
3779 		dinfo = device_get_ivars(child);
3780 		pci_print_verbose(dinfo);
3781 		if (bootverbose)
3782 			pci_printf(&dinfo->cfg, "reprobing on driver added\n");
3783 		pci_cfg_restore(child, dinfo);
3784 		if (device_probe_and_attach(child) != 0)
3785 			pci_child_detached(dev, child);
3786 	}
3787 	free(devlist, M_TEMP);
3788 }
3789 
3790 int
3791 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
3792     driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
3793 {
3794 	struct pci_devinfo *dinfo;
3795 	struct msix_table_entry *mte;
3796 	struct msix_vector *mv;
3797 	uint64_t addr;
3798 	uint32_t data;
3799 	void *cookie;
3800 	int error, rid;
3801 
3802 	error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
3803 	    arg, &cookie);
3804 	if (error)
3805 		return (error);
3806 
3807 	/* If this is not a direct child, just bail out. */
3808 	if (device_get_parent(child) != dev) {
3809 		*cookiep = cookie;
3810 		return(0);
3811 	}
3812 
3813 	rid = rman_get_rid(irq);
3814 	if (rid == 0) {
3815 		/* Make sure that INTx is enabled */
3816 		pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS);
3817 	} else {
3818 		/*
3819 		 * Check to see if the interrupt is MSI or MSI-X.
3820 		 * Ask our parent to map the MSI and give
3821 		 * us the address and data register values.
3822 		 * If we fail for some reason, teardown the
3823 		 * interrupt handler.
3824 		 */
3825 		dinfo = device_get_ivars(child);
3826 		if (dinfo->cfg.msi.msi_alloc > 0) {
3827 			if (dinfo->cfg.msi.msi_addr == 0) {
3828 				KASSERT(dinfo->cfg.msi.msi_handlers == 0,
3829 			    ("MSI has handlers, but vectors not mapped"));
3830 				error = PCIB_MAP_MSI(device_get_parent(dev),
3831 				    child, rman_get_start(irq), &addr, &data);
3832 				if (error)
3833 					goto bad;
3834 				dinfo->cfg.msi.msi_addr = addr;
3835 				dinfo->cfg.msi.msi_data = data;
3836 			}
3837 			if (dinfo->cfg.msi.msi_handlers == 0)
3838 				pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
3839 				    dinfo->cfg.msi.msi_data);
3840 			dinfo->cfg.msi.msi_handlers++;
3841 		} else {
3842 			KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3843 			    ("No MSI or MSI-X interrupts allocated"));
3844 			KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3845 			    ("MSI-X index too high"));
3846 			mte = &dinfo->cfg.msix.msix_table[rid - 1];
3847 			KASSERT(mte->mte_vector != 0, ("no message vector"));
3848 			mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
3849 			KASSERT(mv->mv_irq == rman_get_start(irq),
3850 			    ("IRQ mismatch"));
3851 			if (mv->mv_address == 0) {
3852 				KASSERT(mte->mte_handlers == 0,
3853 		    ("MSI-X table entry has handlers, but vector not mapped"));
3854 				error = PCIB_MAP_MSI(device_get_parent(dev),
3855 				    child, rman_get_start(irq), &addr, &data);
3856 				if (error)
3857 					goto bad;
3858 				mv->mv_address = addr;
3859 				mv->mv_data = data;
3860 			}
3861 			if (mte->mte_handlers == 0) {
3862 				pci_enable_msix(child, rid - 1, mv->mv_address,
3863 				    mv->mv_data);
3864 				pci_unmask_msix(child, rid - 1);
3865 			}
3866 			mte->mte_handlers++;
3867 		}
3868 
3869 		if (!pci_has_quirk(pci_get_devid(dev),
3870 		    PCI_QUIRK_MSI_INTX_BUG)) {
3871 			/*
3872 			 * Make sure that INTx is disabled if we are
3873 			 * using MSI/MSIX
3874 			 */
3875 			pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3876 		}
3877 	bad:
3878 		if (error) {
3879 			(void)bus_generic_teardown_intr(dev, child, irq,
3880 			    cookie);
3881 			return (error);
3882 		}
3883 	}
3884 	*cookiep = cookie;
3885 	return (0);
3886 }
3887 
3888 int
3889 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
3890     void *cookie)
3891 {
3892 	struct msix_table_entry *mte;
3893 	struct resource_list_entry *rle;
3894 	struct pci_devinfo *dinfo;
3895 	int error, rid;
3896 
3897 	if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
3898 		return (EINVAL);
3899 
3900 	/* If this isn't a direct child, just bail out */
3901 	if (device_get_parent(child) != dev)
3902 		return(bus_generic_teardown_intr(dev, child, irq, cookie));
3903 
3904 	rid = rman_get_rid(irq);
3905 	if (rid == 0) {
3906 		/* Mask INTx */
3907 		pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS);
3908 	} else {
3909 		/*
3910 		 * Check to see if the interrupt is MSI or MSI-X.  If so,
3911 		 * decrement the appropriate handlers count and mask the
3912 		 * MSI-X message, or disable MSI messages if the count
3913 		 * drops to 0.
3914 		 */
3915 		dinfo = device_get_ivars(child);
3916 		rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
3917 		if (rle->res != irq)
3918 			return (EINVAL);
3919 		if (dinfo->cfg.msi.msi_alloc > 0) {
3920 			KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
3921 			    ("MSI-X index too high"));
3922 			if (dinfo->cfg.msi.msi_handlers == 0)
3923 				return (EINVAL);
3924 			dinfo->cfg.msi.msi_handlers--;
3925 			if (dinfo->cfg.msi.msi_handlers == 0)
3926 				pci_disable_msi(child);
3927 		} else {
3928 			KASSERT(dinfo->cfg.msix.msix_alloc > 0,
3929 			    ("No MSI or MSI-X interrupts allocated"));
3930 			KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
3931 			    ("MSI-X index too high"));
3932 			mte = &dinfo->cfg.msix.msix_table[rid - 1];
3933 			if (mte->mte_handlers == 0)
3934 				return (EINVAL);
3935 			mte->mte_handlers--;
3936 			if (mte->mte_handlers == 0)
3937 				pci_mask_msix(child, rid - 1);
3938 		}
3939 	}
3940 	error = bus_generic_teardown_intr(dev, child, irq, cookie);
3941 	if (rid > 0)
3942 		KASSERT(error == 0,
3943 		    ("%s: generic teardown failed for MSI/MSI-X", __func__));
3944 	return (error);
3945 }
3946 
3947 int
3948 pci_print_child(device_t dev, device_t child)
3949 {
3950 	struct pci_devinfo *dinfo;
3951 	struct resource_list *rl;
3952 	int retval = 0;
3953 
3954 	dinfo = device_get_ivars(child);
3955 	rl = &dinfo->resources;
3956 
3957 	retval += bus_print_child_header(dev, child);
3958 
3959 	retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
3960 	retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
3961 	retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
3962 	if (device_get_flags(dev))
3963 		retval += printf(" flags %#x", device_get_flags(dev));
3964 
3965 	retval += printf(" at device %d.%d", pci_get_slot(child),
3966 	    pci_get_function(child));
3967 
3968 	retval += bus_print_child_domain(dev, child);
3969 	retval += bus_print_child_footer(dev, child);
3970 
3971 	return (retval);
3972 }
3973 
3974 static const struct
3975 {
3976 	int		class;
3977 	int		subclass;
3978 	int		report; /* 0 = bootverbose, 1 = always */
3979 	const char	*desc;
3980 } pci_nomatch_tab[] = {
3981 	{PCIC_OLD,		-1,			1, "old"},
3982 	{PCIC_OLD,		PCIS_OLD_NONVGA,	1, "non-VGA display device"},
3983 	{PCIC_OLD,		PCIS_OLD_VGA,		1, "VGA-compatible display device"},
3984 	{PCIC_STORAGE,		-1,			1, "mass storage"},
3985 	{PCIC_STORAGE,		PCIS_STORAGE_SCSI,	1, "SCSI"},
3986 	{PCIC_STORAGE,		PCIS_STORAGE_IDE,	1, "ATA"},
3987 	{PCIC_STORAGE,		PCIS_STORAGE_FLOPPY,	1, "floppy disk"},
3988 	{PCIC_STORAGE,		PCIS_STORAGE_IPI,	1, "IPI"},
3989 	{PCIC_STORAGE,		PCIS_STORAGE_RAID,	1, "RAID"},
3990 	{PCIC_STORAGE,		PCIS_STORAGE_ATA_ADMA,	1, "ATA (ADMA)"},
3991 	{PCIC_STORAGE,		PCIS_STORAGE_SATA,	1, "SATA"},
3992 	{PCIC_STORAGE,		PCIS_STORAGE_SAS,	1, "SAS"},
3993 	{PCIC_STORAGE,		PCIS_STORAGE_NVM,	1, "NVM"},
3994 	{PCIC_NETWORK,		-1,			1, "network"},
3995 	{PCIC_NETWORK,		PCIS_NETWORK_ETHERNET,	1, "ethernet"},
3996 	{PCIC_NETWORK,		PCIS_NETWORK_TOKENRING,	1, "token ring"},
3997 	{PCIC_NETWORK,		PCIS_NETWORK_FDDI,	1, "fddi"},
3998 	{PCIC_NETWORK,		PCIS_NETWORK_ATM,	1, "ATM"},
3999 	{PCIC_NETWORK,		PCIS_NETWORK_ISDN,	1, "ISDN"},
4000 	{PCIC_DISPLAY,		-1,			1, "display"},
4001 	{PCIC_DISPLAY,		PCIS_DISPLAY_VGA,	1, "VGA"},
4002 	{PCIC_DISPLAY,		PCIS_DISPLAY_XGA,	1, "XGA"},
4003 	{PCIC_DISPLAY,		PCIS_DISPLAY_3D,	1, "3D"},
4004 	{PCIC_MULTIMEDIA,	-1,			1, "multimedia"},
4005 	{PCIC_MULTIMEDIA,	PCIS_MULTIMEDIA_VIDEO,	1, "video"},
4006 	{PCIC_MULTIMEDIA,	PCIS_MULTIMEDIA_AUDIO,	1, "audio"},
4007 	{PCIC_MULTIMEDIA,	PCIS_MULTIMEDIA_TELE,	1, "telephony"},
4008 	{PCIC_MULTIMEDIA,	PCIS_MULTIMEDIA_HDA,	1, "HDA"},
4009 	{PCIC_MEMORY,		-1,			1, "memory"},
4010 	{PCIC_MEMORY,		PCIS_MEMORY_RAM,	1, "RAM"},
4011 	{PCIC_MEMORY,		PCIS_MEMORY_FLASH,	1, "flash"},
4012 	{PCIC_BRIDGE,		-1,			1, "bridge"},
4013 	{PCIC_BRIDGE,		PCIS_BRIDGE_HOST,	1, "HOST-PCI"},
4014 	{PCIC_BRIDGE,		PCIS_BRIDGE_ISA,	1, "PCI-ISA"},
4015 	{PCIC_BRIDGE,		PCIS_BRIDGE_EISA,	1, "PCI-EISA"},
4016 	{PCIC_BRIDGE,		PCIS_BRIDGE_MCA,	1, "PCI-MCA"},
4017 	{PCIC_BRIDGE,		PCIS_BRIDGE_PCI,	1, "PCI-PCI"},
4018 	{PCIC_BRIDGE,		PCIS_BRIDGE_PCMCIA,	1, "PCI-PCMCIA"},
4019 	{PCIC_BRIDGE,		PCIS_BRIDGE_NUBUS,	1, "PCI-NuBus"},
4020 	{PCIC_BRIDGE,		PCIS_BRIDGE_CARDBUS,	1, "PCI-CardBus"},
4021 	{PCIC_BRIDGE,		PCIS_BRIDGE_RACEWAY,	1, "PCI-RACEway"},
4022 	{PCIC_SIMPLECOMM,	-1,			1, "simple comms"},
4023 	{PCIC_SIMPLECOMM,	PCIS_SIMPLECOMM_UART,	1, "UART"},	/* could detect 16550 */
4024 	{PCIC_SIMPLECOMM,	PCIS_SIMPLECOMM_PAR,	1, "parallel port"},
4025 	{PCIC_SIMPLECOMM,	PCIS_SIMPLECOMM_MULSER,	1, "multiport serial"},
4026 	{PCIC_SIMPLECOMM,	PCIS_SIMPLECOMM_MODEM,	1, "generic modem"},
4027 	{PCIC_BASEPERIPH,	-1,			0, "base peripheral"},
4028 	{PCIC_BASEPERIPH,	PCIS_BASEPERIPH_PIC,	1, "interrupt controller"},
4029 	{PCIC_BASEPERIPH,	PCIS_BASEPERIPH_DMA,	1, "DMA controller"},
4030 	{PCIC_BASEPERIPH,	PCIS_BASEPERIPH_TIMER,	1, "timer"},
4031 	{PCIC_BASEPERIPH,	PCIS_BASEPERIPH_RTC,	1, "realtime clock"},
4032 	{PCIC_BASEPERIPH,	PCIS_BASEPERIPH_PCIHOT,	1, "PCI hot-plug controller"},
4033 	{PCIC_BASEPERIPH,	PCIS_BASEPERIPH_SDHC,	1, "SD host controller"},
4034 	{PCIC_BASEPERIPH,	PCIS_BASEPERIPH_IOMMU,	1, "IOMMU"},
4035 	{PCIC_INPUTDEV,		-1,			1, "input device"},
4036 	{PCIC_INPUTDEV,		PCIS_INPUTDEV_KEYBOARD,	1, "keyboard"},
4037 	{PCIC_INPUTDEV,		PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
4038 	{PCIC_INPUTDEV,		PCIS_INPUTDEV_MOUSE,	1, "mouse"},
4039 	{PCIC_INPUTDEV,		PCIS_INPUTDEV_SCANNER,	1, "scanner"},
4040 	{PCIC_INPUTDEV,		PCIS_INPUTDEV_GAMEPORT,	1, "gameport"},
4041 	{PCIC_DOCKING,		-1,			1, "docking station"},
4042 	{PCIC_PROCESSOR,	-1,			1, "processor"},
4043 	{PCIC_SERIALBUS,	-1,			1, "serial bus"},
4044 	{PCIC_SERIALBUS,	PCIS_SERIALBUS_FW,	1, "FireWire"},
4045 	{PCIC_SERIALBUS,	PCIS_SERIALBUS_ACCESS,	1, "AccessBus"},
4046 	{PCIC_SERIALBUS,	PCIS_SERIALBUS_SSA,	1, "SSA"},
4047 	{PCIC_SERIALBUS,	PCIS_SERIALBUS_USB,	1, "USB"},
4048 	{PCIC_SERIALBUS,	PCIS_SERIALBUS_FC,	1, "Fibre Channel"},
4049 	{PCIC_SERIALBUS,	PCIS_SERIALBUS_SMBUS,	0, "SMBus"},
4050 	{PCIC_WIRELESS,		-1,			1, "wireless controller"},
4051 	{PCIC_WIRELESS,		PCIS_WIRELESS_IRDA,	1, "iRDA"},
4052 	{PCIC_WIRELESS,		PCIS_WIRELESS_IR,	1, "IR"},
4053 	{PCIC_WIRELESS,		PCIS_WIRELESS_RF,	1, "RF"},
4054 	{PCIC_INTELLIIO,	-1,			1, "intelligent I/O controller"},
4055 	{PCIC_INTELLIIO,	PCIS_INTELLIIO_I2O,	1, "I2O"},
4056 	{PCIC_SATCOM,		-1,			1, "satellite communication"},
4057 	{PCIC_SATCOM,		PCIS_SATCOM_TV,		1, "sat TV"},
4058 	{PCIC_SATCOM,		PCIS_SATCOM_AUDIO,	1, "sat audio"},
4059 	{PCIC_SATCOM,		PCIS_SATCOM_VOICE,	1, "sat voice"},
4060 	{PCIC_SATCOM,		PCIS_SATCOM_DATA,	1, "sat data"},
4061 	{PCIC_CRYPTO,		-1,			1, "encrypt/decrypt"},
4062 	{PCIC_CRYPTO,		PCIS_CRYPTO_NETCOMP,	1, "network/computer crypto"},
4063 	{PCIC_CRYPTO,		PCIS_CRYPTO_ENTERTAIN,	1, "entertainment crypto"},
4064 	{PCIC_DASP,		-1,			0, "dasp"},
4065 	{PCIC_DASP,		PCIS_DASP_DPIO,		1, "DPIO module"},
4066 	{0, 0, 0,		NULL}
4067 };
4068 
4069 void
4070 pci_probe_nomatch(device_t dev, device_t child)
4071 {
4072 	int i, report;
4073 	const char *cp, *scp;
4074 	char *device;
4075 
4076 	/*
4077 	 * Look for a listing for this device in a loaded device database.
4078 	 */
4079 	report = 1;
4080 	if ((device = pci_describe_device(child)) != NULL) {
4081 		device_printf(dev, "<%s>", device);
4082 		free(device, M_DEVBUF);
4083 	} else {
4084 		/*
4085 		 * Scan the class/subclass descriptions for a general
4086 		 * description.
4087 		 */
4088 		cp = "unknown";
4089 		scp = NULL;
4090 		for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
4091 			if (pci_nomatch_tab[i].class == pci_get_class(child)) {
4092 				if (pci_nomatch_tab[i].subclass == -1) {
4093 					cp = pci_nomatch_tab[i].desc;
4094 					report = pci_nomatch_tab[i].report;
4095 				} else if (pci_nomatch_tab[i].subclass ==
4096 				    pci_get_subclass(child)) {
4097 					scp = pci_nomatch_tab[i].desc;
4098 					report = pci_nomatch_tab[i].report;
4099 				}
4100 			}
4101 		}
4102 		if (report || bootverbose) {
4103 			device_printf(dev, "<%s%s%s>",
4104 			    cp ? cp : "",
4105 			    ((cp != NULL) && (scp != NULL)) ? ", " : "",
4106 			    scp ? scp : "");
4107 		}
4108 	}
4109 	if (report || bootverbose) {
4110 		printf(" at device %d.%d (no driver attached)\n",
4111 		    pci_get_slot(child), pci_get_function(child));
4112 	}
4113 	pci_cfg_save(child, device_get_ivars(child), 1);
4114 }
4115 
4116 void
4117 pci_child_detached(device_t dev, device_t child)
4118 {
4119 	struct pci_devinfo *dinfo;
4120 	struct resource_list *rl;
4121 
4122 	dinfo = device_get_ivars(child);
4123 	rl = &dinfo->resources;
4124 
4125 	/*
4126 	 * Have to deallocate IRQs before releasing any MSI messages and
4127 	 * have to release MSI messages before deallocating any memory
4128 	 * BARs.
4129 	 */
4130 	if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
4131 		pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
4132 	if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
4133 		pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n");
4134 		(void)pci_release_msi(child);
4135 	}
4136 	if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
4137 		pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
4138 	if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
4139 		pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
4140 #ifdef PCI_RES_BUS
4141 	if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
4142 		pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
4143 #endif
4144 
4145 	pci_cfg_save(child, dinfo, 1);
4146 }
4147 
4148 /*
4149  * Parse the PCI device database, if loaded, and return a pointer to a
4150  * description of the device.
4151  *
4152  * The database is flat text formatted as follows:
4153  *
4154  * Any line not in a valid format is ignored.
4155  * Lines are terminated with newline '\n' characters.
4156  *
4157  * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
4158  * the vendor name.
4159  *
4160  * A DEVICE line is entered immediately below the corresponding VENDOR ID.
4161  * - devices cannot be listed without a corresponding VENDOR line.
4162  * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
4163  * another TAB, then the device name.
4164  */
4165 
4166 /*
4167  * Assuming (ptr) points to the beginning of a line in the database,
4168  * return the vendor or device and description of the next entry.
4169  * The value of (vendor) or (device) inappropriate for the entry type
4170  * is set to -1.  Returns nonzero at the end of the database.
4171  *
4172  * Note that this is slightly unrobust in the face of corrupt data;
4173  * we attempt to safeguard against this by spamming the end of the
4174  * database with a newline when we initialise.
4175  */
4176 static int
4177 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
4178 {
4179 	char	*cp = *ptr;
4180 	int	left;
4181 
4182 	*device = -1;
4183 	*vendor = -1;
4184 	**desc = '\0';
4185 	for (;;) {
4186 		left = pci_vendordata_size - (cp - pci_vendordata);
4187 		if (left <= 0) {
4188 			*ptr = cp;
4189 			return(1);
4190 		}
4191 
4192 		/* vendor entry? */
4193 		if (*cp != '\t' &&
4194 		    sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
4195 			break;
4196 		/* device entry? */
4197 		if (*cp == '\t' &&
4198 		    sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
4199 			break;
4200 
4201 		/* skip to next line */
4202 		while (*cp != '\n' && left > 0) {
4203 			cp++;
4204 			left--;
4205 		}
4206 		if (*cp == '\n') {
4207 			cp++;
4208 			left--;
4209 		}
4210 	}
4211 	/* skip to next line */
4212 	while (*cp != '\n' && left > 0) {
4213 		cp++;
4214 		left--;
4215 	}
4216 	if (*cp == '\n' && left > 0)
4217 		cp++;
4218 	*ptr = cp;
4219 	return(0);
4220 }
4221 
4222 static char *
4223 pci_describe_device(device_t dev)
4224 {
4225 	int	vendor, device;
4226 	char	*desc, *vp, *dp, *line;
4227 
4228 	desc = vp = dp = NULL;
4229 
4230 	/*
4231 	 * If we have no vendor data, we can't do anything.
4232 	 */
4233 	if (pci_vendordata == NULL)
4234 		goto out;
4235 
4236 	/*
4237 	 * Scan the vendor data looking for this device
4238 	 */
4239 	line = pci_vendordata;
4240 	if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4241 		goto out;
4242 	for (;;) {
4243 		if (pci_describe_parse_line(&line, &vendor, &device, &vp))
4244 			goto out;
4245 		if (vendor == pci_get_vendor(dev))
4246 			break;
4247 	}
4248 	if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
4249 		goto out;
4250 	for (;;) {
4251 		if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
4252 			*dp = 0;
4253 			break;
4254 		}
4255 		if (vendor != -1) {
4256 			*dp = 0;
4257 			break;
4258 		}
4259 		if (device == pci_get_device(dev))
4260 			break;
4261 	}
4262 	if (dp[0] == '\0')
4263 		snprintf(dp, 80, "0x%x", pci_get_device(dev));
4264 	if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
4265 	    NULL)
4266 		sprintf(desc, "%s, %s", vp, dp);
4267 out:
4268 	if (vp != NULL)
4269 		free(vp, M_DEVBUF);
4270 	if (dp != NULL)
4271 		free(dp, M_DEVBUF);
4272 	return(desc);
4273 }
4274 
4275 int
4276 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
4277 {
4278 	struct pci_devinfo *dinfo;
4279 	pcicfgregs *cfg;
4280 
4281 	dinfo = device_get_ivars(child);
4282 	cfg = &dinfo->cfg;
4283 
4284 	switch (which) {
4285 	case PCI_IVAR_ETHADDR:
4286 		/*
4287 		 * The generic accessor doesn't deal with failure, so
4288 		 * we set the return value, then return an error.
4289 		 */
4290 		*((uint8_t **) result) = NULL;
4291 		return (EINVAL);
4292 	case PCI_IVAR_SUBVENDOR:
4293 		*result = cfg->subvendor;
4294 		break;
4295 	case PCI_IVAR_SUBDEVICE:
4296 		*result = cfg->subdevice;
4297 		break;
4298 	case PCI_IVAR_VENDOR:
4299 		*result = cfg->vendor;
4300 		break;
4301 	case PCI_IVAR_DEVICE:
4302 		*result = cfg->device;
4303 		break;
4304 	case PCI_IVAR_DEVID:
4305 		*result = (cfg->device << 16) | cfg->vendor;
4306 		break;
4307 	case PCI_IVAR_CLASS:
4308 		*result = cfg->baseclass;
4309 		break;
4310 	case PCI_IVAR_SUBCLASS:
4311 		*result = cfg->subclass;
4312 		break;
4313 	case PCI_IVAR_PROGIF:
4314 		*result = cfg->progif;
4315 		break;
4316 	case PCI_IVAR_REVID:
4317 		*result = cfg->revid;
4318 		break;
4319 	case PCI_IVAR_INTPIN:
4320 		*result = cfg->intpin;
4321 		break;
4322 	case PCI_IVAR_IRQ:
4323 		*result = cfg->intline;
4324 		break;
4325 	case PCI_IVAR_DOMAIN:
4326 		*result = cfg->domain;
4327 		break;
4328 	case PCI_IVAR_BUS:
4329 		*result = cfg->bus;
4330 		break;
4331 	case PCI_IVAR_SLOT:
4332 		*result = cfg->slot;
4333 		break;
4334 	case PCI_IVAR_FUNCTION:
4335 		*result = cfg->func;
4336 		break;
4337 	case PCI_IVAR_CMDREG:
4338 		*result = cfg->cmdreg;
4339 		break;
4340 	case PCI_IVAR_CACHELNSZ:
4341 		*result = cfg->cachelnsz;
4342 		break;
4343 	case PCI_IVAR_MINGNT:
4344 		*result = cfg->mingnt;
4345 		break;
4346 	case PCI_IVAR_MAXLAT:
4347 		*result = cfg->maxlat;
4348 		break;
4349 	case PCI_IVAR_LATTIMER:
4350 		*result = cfg->lattimer;
4351 		break;
4352 	default:
4353 		return (ENOENT);
4354 	}
4355 	return (0);
4356 }
4357 
4358 int
4359 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
4360 {
4361 	struct pci_devinfo *dinfo;
4362 
4363 	dinfo = device_get_ivars(child);
4364 
4365 	switch (which) {
4366 	case PCI_IVAR_INTPIN:
4367 		dinfo->cfg.intpin = value;
4368 		return (0);
4369 	case PCI_IVAR_ETHADDR:
4370 	case PCI_IVAR_SUBVENDOR:
4371 	case PCI_IVAR_SUBDEVICE:
4372 	case PCI_IVAR_VENDOR:
4373 	case PCI_IVAR_DEVICE:
4374 	case PCI_IVAR_DEVID:
4375 	case PCI_IVAR_CLASS:
4376 	case PCI_IVAR_SUBCLASS:
4377 	case PCI_IVAR_PROGIF:
4378 	case PCI_IVAR_REVID:
4379 	case PCI_IVAR_IRQ:
4380 	case PCI_IVAR_DOMAIN:
4381 	case PCI_IVAR_BUS:
4382 	case PCI_IVAR_SLOT:
4383 	case PCI_IVAR_FUNCTION:
4384 		return (EINVAL);	/* disallow for now */
4385 
4386 	default:
4387 		return (ENOENT);
4388 	}
4389 }
4390 
4391 #include "opt_ddb.h"
4392 #ifdef DDB
4393 #include <ddb/ddb.h>
4394 #include <sys/cons.h>
4395 
4396 /*
4397  * List resources based on pci map registers, used for within ddb
4398  */
4399 
4400 DB_SHOW_COMMAND(pciregs, db_pci_dump)
4401 {
4402 	struct pci_devinfo *dinfo;
4403 	struct devlist *devlist_head;
4404 	struct pci_conf *p;
4405 	const char *name;
4406 	int i, error, none_count;
4407 
4408 	none_count = 0;
4409 	/* get the head of the device queue */
4410 	devlist_head = &pci_devq;
4411 
4412 	/*
4413 	 * Go through the list of devices and print out devices
4414 	 */
4415 	for (error = 0, i = 0,
4416 	     dinfo = STAILQ_FIRST(devlist_head);
4417 	     (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
4418 	     dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
4419 
4420 		/* Populate pd_name and pd_unit */
4421 		name = NULL;
4422 		if (dinfo->cfg.dev)
4423 			name = device_get_name(dinfo->cfg.dev);
4424 
4425 		p = &dinfo->conf;
4426 		db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
4427 			"chip=0x%08x rev=0x%02x hdr=0x%02x\n",
4428 			(name && *name) ? name : "none",
4429 			(name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
4430 			none_count++,
4431 			p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
4432 			p->pc_sel.pc_func, (p->pc_class << 16) |
4433 			(p->pc_subclass << 8) | p->pc_progif,
4434 			(p->pc_subdevice << 16) | p->pc_subvendor,
4435 			(p->pc_device << 16) | p->pc_vendor,
4436 			p->pc_revid, p->pc_hdr);
4437 	}
4438 }
4439 #endif /* DDB */
4440 
4441 static struct resource *
4442 pci_reserve_map(device_t dev, device_t child, int type, int *rid,
4443     u_long start, u_long end, u_long count, u_int flags)
4444 {
4445 	struct pci_devinfo *dinfo = device_get_ivars(child);
4446 	struct resource_list *rl = &dinfo->resources;
4447 	struct resource *res;
4448 	struct pci_map *pm;
4449 	pci_addr_t map, testval;
4450 	int mapsize;
4451 
4452 	res = NULL;
4453 	pm = pci_find_bar(child, *rid);
4454 	if (pm != NULL) {
4455 		/* This is a BAR that we failed to allocate earlier. */
4456 		mapsize = pm->pm_size;
4457 		map = pm->pm_value;
4458 	} else {
4459 		/*
4460 		 * Weed out the bogons, and figure out how large the
4461 		 * BAR/map is.  BARs that read back 0 here are bogus
4462 		 * and unimplemented.  Note: atapci in legacy mode are
4463 		 * special and handled elsewhere in the code.  If you
4464 		 * have a atapci device in legacy mode and it fails
4465 		 * here, that other code is broken.
4466 		 */
4467 		pci_read_bar(child, *rid, &map, &testval);
4468 
4469 		/*
4470 		 * Determine the size of the BAR and ignore BARs with a size
4471 		 * of 0.  Device ROM BARs use a different mask value.
4472 		 */
4473 		if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
4474 			mapsize = pci_romsize(testval);
4475 		else
4476 			mapsize = pci_mapsize(testval);
4477 		if (mapsize == 0)
4478 			goto out;
4479 		pm = pci_add_bar(child, *rid, map, mapsize);
4480 	}
4481 
4482 	if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
4483 		if (type != SYS_RES_MEMORY) {
4484 			if (bootverbose)
4485 				device_printf(dev,
4486 				    "child %s requested type %d for rid %#x,"
4487 				    " but the BAR says it is an memio\n",
4488 				    device_get_nameunit(child), type, *rid);
4489 			goto out;
4490 		}
4491 	} else {
4492 		if (type != SYS_RES_IOPORT) {
4493 			if (bootverbose)
4494 				device_printf(dev,
4495 				    "child %s requested type %d for rid %#x,"
4496 				    " but the BAR says it is an ioport\n",
4497 				    device_get_nameunit(child), type, *rid);
4498 			goto out;
4499 		}
4500 	}
4501 
4502 	/*
4503 	 * For real BARs, we need to override the size that
4504 	 * the driver requests, because that's what the BAR
4505 	 * actually uses and we would otherwise have a
4506 	 * situation where we might allocate the excess to
4507 	 * another driver, which won't work.
4508 	 */
4509 	count = (pci_addr_t)1 << mapsize;
4510 	if (RF_ALIGNMENT(flags) < mapsize)
4511 		flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
4512 	if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
4513 		flags |= RF_PREFETCHABLE;
4514 
4515 	/*
4516 	 * Allocate enough resource, and then write back the
4517 	 * appropriate BAR for that resource.
4518 	 */
4519 	resource_list_add(rl, type, *rid, start, end, count);
4520 	res = resource_list_reserve(rl, dev, child, type, rid, start, end,
4521 	    count, flags & ~RF_ACTIVE);
4522 	if (res == NULL) {
4523 		resource_list_delete(rl, type, *rid);
4524 		device_printf(child,
4525 		    "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
4526 		    count, *rid, type, start, end);
4527 		goto out;
4528 	}
4529 	if (bootverbose)
4530 		device_printf(child,
4531 		    "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
4532 		    count, *rid, type, rman_get_start(res));
4533 	map = rman_get_start(res);
4534 	pci_write_bar(child, pm, map);
4535 out:
4536 	return (res);
4537 }
4538 
4539 struct resource *
4540 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
4541 		   u_long start, u_long end, u_long count, u_int flags)
4542 {
4543 	struct pci_devinfo *dinfo;
4544 	struct resource_list *rl;
4545 	struct resource_list_entry *rle;
4546 	struct resource *res;
4547 	pcicfgregs *cfg;
4548 
4549 	if (device_get_parent(child) != dev)
4550 		return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
4551 		    type, rid, start, end, count, flags));
4552 
4553 	/*
4554 	 * Perform lazy resource allocation
4555 	 */
4556 	dinfo = device_get_ivars(child);
4557 	rl = &dinfo->resources;
4558 	cfg = &dinfo->cfg;
4559 	switch (type) {
4560 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
4561 	case PCI_RES_BUS:
4562 		return (pci_alloc_secbus(dev, child, rid, start, end, count,
4563 		    flags));
4564 #endif
4565 	case SYS_RES_IRQ:
4566 		/*
4567 		 * Can't alloc legacy interrupt once MSI messages have
4568 		 * been allocated.
4569 		 */
4570 		if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
4571 		    cfg->msix.msix_alloc > 0))
4572 			return (NULL);
4573 
4574 		/*
4575 		 * If the child device doesn't have an interrupt
4576 		 * routed and is deserving of an interrupt, try to
4577 		 * assign it one.
4578 		 */
4579 		if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
4580 		    (cfg->intpin != 0))
4581 			pci_assign_interrupt(dev, child, 0);
4582 		break;
4583 	case SYS_RES_IOPORT:
4584 	case SYS_RES_MEMORY:
4585 #ifdef NEW_PCIB
4586 		/*
4587 		 * PCI-PCI bridge I/O window resources are not BARs.
4588 		 * For those allocations just pass the request up the
4589 		 * tree.
4590 		 */
4591 		if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
4592 			switch (*rid) {
4593 			case PCIR_IOBASEL_1:
4594 			case PCIR_MEMBASE_1:
4595 			case PCIR_PMBASEL_1:
4596 				/*
4597 				 * XXX: Should we bother creating a resource
4598 				 * list entry?
4599 				 */
4600 				return (bus_generic_alloc_resource(dev, child,
4601 				    type, rid, start, end, count, flags));
4602 			}
4603 		}
4604 #endif
4605 		/* Reserve resources for this BAR if needed. */
4606 		rle = resource_list_find(rl, type, *rid);
4607 		if (rle == NULL) {
4608 			res = pci_reserve_map(dev, child, type, rid, start, end,
4609 			    count, flags);
4610 			if (res == NULL)
4611 				return (NULL);
4612 		}
4613 	}
4614 	return (resource_list_alloc(rl, dev, child, type, rid,
4615 	    start, end, count, flags));
4616 }
4617 
4618 int
4619 pci_release_resource(device_t dev, device_t child, int type, int rid,
4620     struct resource *r)
4621 {
4622 	struct pci_devinfo *dinfo;
4623 	struct resource_list *rl;
4624 	pcicfgregs *cfg;
4625 
4626 	if (device_get_parent(child) != dev)
4627 		return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
4628 		    type, rid, r));
4629 
4630 	dinfo = device_get_ivars(child);
4631 	cfg = &dinfo->cfg;
4632 #ifdef NEW_PCIB
4633 	/*
4634 	 * PCI-PCI bridge I/O window resources are not BARs.  For
4635 	 * those allocations just pass the request up the tree.
4636 	 */
4637 	if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
4638 	    (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
4639 		switch (rid) {
4640 		case PCIR_IOBASEL_1:
4641 		case PCIR_MEMBASE_1:
4642 		case PCIR_PMBASEL_1:
4643 			return (bus_generic_release_resource(dev, child, type,
4644 			    rid, r));
4645 		}
4646 	}
4647 #endif
4648 
4649 	rl = &dinfo->resources;
4650 	return (resource_list_release(rl, dev, child, type, rid, r));
4651 }
4652 
4653 int
4654 pci_activate_resource(device_t dev, device_t child, int type, int rid,
4655     struct resource *r)
4656 {
4657 	struct pci_devinfo *dinfo;
4658 	int error;
4659 
4660 	error = bus_generic_activate_resource(dev, child, type, rid, r);
4661 	if (error)
4662 		return (error);
4663 
4664 	/* Enable decoding in the command register when activating BARs. */
4665 	if (device_get_parent(child) == dev) {
4666 		/* Device ROMs need their decoding explicitly enabled. */
4667 		dinfo = device_get_ivars(child);
4668 		if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4669 			pci_write_bar(child, pci_find_bar(child, rid),
4670 			    rman_get_start(r) | PCIM_BIOS_ENABLE);
4671 		switch (type) {
4672 		case SYS_RES_IOPORT:
4673 		case SYS_RES_MEMORY:
4674 			error = PCI_ENABLE_IO(dev, child, type);
4675 			break;
4676 		}
4677 	}
4678 	return (error);
4679 }
4680 
4681 int
4682 pci_deactivate_resource(device_t dev, device_t child, int type,
4683     int rid, struct resource *r)
4684 {
4685 	struct pci_devinfo *dinfo;
4686 	int error;
4687 
4688 	error = bus_generic_deactivate_resource(dev, child, type, rid, r);
4689 	if (error)
4690 		return (error);
4691 
4692 	/* Disable decoding for device ROMs. */
4693 	if (device_get_parent(child) == dev) {
4694 		dinfo = device_get_ivars(child);
4695 		if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
4696 			pci_write_bar(child, pci_find_bar(child, rid),
4697 			    rman_get_start(r));
4698 	}
4699 	return (0);
4700 }
4701 
4702 void
4703 pci_delete_child(device_t dev, device_t child)
4704 {
4705 	struct resource_list_entry *rle;
4706 	struct resource_list *rl;
4707 	struct pci_devinfo *dinfo;
4708 
4709 	dinfo = device_get_ivars(child);
4710 	rl = &dinfo->resources;
4711 
4712 	if (device_is_attached(child))
4713 		device_detach(child);
4714 
4715 	/* Turn off access to resources we're about to free */
4716 	pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
4717 	    PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2);
4718 
4719 	/* Free all allocated resources */
4720 	STAILQ_FOREACH(rle, rl, link) {
4721 		if (rle->res) {
4722 			if (rman_get_flags(rle->res) & RF_ACTIVE ||
4723 			    resource_list_busy(rl, rle->type, rle->rid)) {
4724 				pci_printf(&dinfo->cfg,
4725 				    "Resource still owned, oops. "
4726 				    "(type=%d, rid=%d, addr=%lx)\n",
4727 				    rle->type, rle->rid,
4728 				    rman_get_start(rle->res));
4729 				bus_release_resource(child, rle->type, rle->rid,
4730 				    rle->res);
4731 			}
4732 			resource_list_unreserve(rl, dev, child, rle->type,
4733 			    rle->rid);
4734 		}
4735 	}
4736 	resource_list_free(rl);
4737 
4738 	device_delete_child(dev, child);
4739 	pci_freecfg(dinfo);
4740 }
4741 
4742 void
4743 pci_delete_resource(device_t dev, device_t child, int type, int rid)
4744 {
4745 	struct pci_devinfo *dinfo;
4746 	struct resource_list *rl;
4747 	struct resource_list_entry *rle;
4748 
4749 	if (device_get_parent(child) != dev)
4750 		return;
4751 
4752 	dinfo = device_get_ivars(child);
4753 	rl = &dinfo->resources;
4754 	rle = resource_list_find(rl, type, rid);
4755 	if (rle == NULL)
4756 		return;
4757 
4758 	if (rle->res) {
4759 		if (rman_get_flags(rle->res) & RF_ACTIVE ||
4760 		    resource_list_busy(rl, type, rid)) {
4761 			device_printf(dev, "delete_resource: "
4762 			    "Resource still owned by child, oops. "
4763 			    "(type=%d, rid=%d, addr=%lx)\n",
4764 			    type, rid, rman_get_start(rle->res));
4765 			return;
4766 		}
4767 		resource_list_unreserve(rl, dev, child, type, rid);
4768 	}
4769 	resource_list_delete(rl, type, rid);
4770 }
4771 
4772 struct resource_list *
4773 pci_get_resource_list (device_t dev, device_t child)
4774 {
4775 	struct pci_devinfo *dinfo = device_get_ivars(child);
4776 
4777 	return (&dinfo->resources);
4778 }
4779 
4780 bus_dma_tag_t
4781 pci_get_dma_tag(device_t bus, device_t dev)
4782 {
4783 	struct pci_softc *sc = device_get_softc(bus);
4784 
4785 	return (sc->sc_dma_tag);
4786 }
4787 
4788 uint32_t
4789 pci_read_config_method(device_t dev, device_t child, int reg, int width)
4790 {
4791 	struct pci_devinfo *dinfo = device_get_ivars(child);
4792 	pcicfgregs *cfg = &dinfo->cfg;
4793 
4794 	return (PCIB_READ_CONFIG(device_get_parent(dev),
4795 	    cfg->bus, cfg->slot, cfg->func, reg, width));
4796 }
4797 
4798 void
4799 pci_write_config_method(device_t dev, device_t child, int reg,
4800     uint32_t val, int width)
4801 {
4802 	struct pci_devinfo *dinfo = device_get_ivars(child);
4803 	pcicfgregs *cfg = &dinfo->cfg;
4804 
4805 	PCIB_WRITE_CONFIG(device_get_parent(dev),
4806 	    cfg->bus, cfg->slot, cfg->func, reg, val, width);
4807 }
4808 
4809 int
4810 pci_child_location_str_method(device_t dev, device_t child, char *buf,
4811     size_t buflen)
4812 {
4813 
4814 	snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
4815 	    pci_get_function(child));
4816 	return (0);
4817 }
4818 
4819 int
4820 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
4821     size_t buflen)
4822 {
4823 	struct pci_devinfo *dinfo;
4824 	pcicfgregs *cfg;
4825 
4826 	dinfo = device_get_ivars(child);
4827 	cfg = &dinfo->cfg;
4828 	snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
4829 	    "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
4830 	    cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
4831 	    cfg->progif);
4832 	return (0);
4833 }
4834 
4835 int
4836 pci_assign_interrupt_method(device_t dev, device_t child)
4837 {
4838 	struct pci_devinfo *dinfo = device_get_ivars(child);
4839 	pcicfgregs *cfg = &dinfo->cfg;
4840 
4841 	return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
4842 	    cfg->intpin));
4843 }
4844 
4845 static int
4846 pci_modevent(module_t mod, int what, void *arg)
4847 {
4848 	static struct cdev *pci_cdev;
4849 
4850 	switch (what) {
4851 	case MOD_LOAD:
4852 		STAILQ_INIT(&pci_devq);
4853 		pci_generation = 0;
4854 		pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
4855 		    "pci");
4856 		pci_load_vendor_data();
4857 		break;
4858 
4859 	case MOD_UNLOAD:
4860 		destroy_dev(pci_cdev);
4861 		break;
4862 	}
4863 
4864 	return (0);
4865 }
4866 
4867 static void
4868 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
4869 {
4870 #define	WREG(n, v)	pci_write_config(dev, pos + (n), (v), 2)
4871 	struct pcicfg_pcie *cfg;
4872 	int version, pos;
4873 
4874 	cfg = &dinfo->cfg.pcie;
4875 	pos = cfg->pcie_location;
4876 
4877 	version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
4878 
4879 	WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl);
4880 
4881 	if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4882 	    cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
4883 	    cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
4884 		WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl);
4885 
4886 	if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4887 	    (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
4888 	     (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
4889 		WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl);
4890 
4891 	if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4892 	    cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
4893 		WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl);
4894 
4895 	if (version > 1) {
4896 		WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2);
4897 		WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2);
4898 		WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2);
4899 	}
4900 #undef WREG
4901 }
4902 
4903 static void
4904 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
4905 {
4906 	pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
4907 	    dinfo->cfg.pcix.pcix_command,  2);
4908 }
4909 
4910 void
4911 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
4912 {
4913 
4914 	/*
4915 	 * Only do header type 0 devices.  Type 1 devices are bridges,
4916 	 * which we know need special treatment.  Type 2 devices are
4917 	 * cardbus bridges which also require special treatment.
4918 	 * Other types are unknown, and we err on the side of safety
4919 	 * by ignoring them.
4920 	 */
4921 	if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
4922 		return;
4923 
4924 	/*
4925 	 * Restore the device to full power mode.  We must do this
4926 	 * before we restore the registers because moving from D3 to
4927 	 * D0 will cause the chip's BARs and some other registers to
4928 	 * be reset to some unknown power on reset values.  Cut down
4929 	 * the noise on boot by doing nothing if we are already in
4930 	 * state D0.
4931 	 */
4932 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
4933 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
4934 	pci_restore_bars(dev);
4935 	pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
4936 	pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
4937 	pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
4938 	pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
4939 	pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
4940 	pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
4941 	pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
4942 	pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
4943 	pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
4944 
4945 	/*
4946 	 * Restore extended capabilities for PCI-Express and PCI-X
4947 	 */
4948 	if (dinfo->cfg.pcie.pcie_location != 0)
4949 		pci_cfg_restore_pcie(dev, dinfo);
4950 	if (dinfo->cfg.pcix.pcix_location != 0)
4951 		pci_cfg_restore_pcix(dev, dinfo);
4952 
4953 	/* Restore MSI and MSI-X configurations if they are present. */
4954 	if (dinfo->cfg.msi.msi_location != 0)
4955 		pci_resume_msi(dev);
4956 	if (dinfo->cfg.msix.msix_location != 0)
4957 		pci_resume_msix(dev);
4958 }
4959 
4960 static void
4961 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
4962 {
4963 #define	RREG(n)	pci_read_config(dev, pos + (n), 2)
4964 	struct pcicfg_pcie *cfg;
4965 	int version, pos;
4966 
4967 	cfg = &dinfo->cfg.pcie;
4968 	pos = cfg->pcie_location;
4969 
4970 	cfg->pcie_flags = RREG(PCIER_FLAGS);
4971 
4972 	version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
4973 
4974 	cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL);
4975 
4976 	if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4977 	    cfg->pcie_type == PCIEM_TYPE_ENDPOINT ||
4978 	    cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT)
4979 		cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL);
4980 
4981 	if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4982 	    (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT &&
4983 	     (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
4984 		cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL);
4985 
4986 	if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
4987 	    cfg->pcie_type == PCIEM_TYPE_ROOT_EC)
4988 		cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL);
4989 
4990 	if (version > 1) {
4991 		cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2);
4992 		cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2);
4993 		cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2);
4994 	}
4995 #undef RREG
4996 }
4997 
4998 static void
4999 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
5000 {
5001 	dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
5002 	    dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
5003 }
5004 
5005 void
5006 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
5007 {
5008 	uint32_t cls;
5009 	int ps;
5010 
5011 	/*
5012 	 * Only do header type 0 devices.  Type 1 devices are bridges, which
5013 	 * we know need special treatment.  Type 2 devices are cardbus bridges
5014 	 * which also require special treatment.  Other types are unknown, and
5015 	 * we err on the side of safety by ignoring them.  Powering down
5016 	 * bridges should not be undertaken lightly.
5017 	 */
5018 	if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL)
5019 		return;
5020 
5021 	/*
5022 	 * Some drivers apparently write to these registers w/o updating our
5023 	 * cached copy.  No harm happens if we update the copy, so do so here
5024 	 * so we can restore them.  The COMMAND register is modified by the
5025 	 * bus w/o updating the cache.  This should represent the normally
5026 	 * writable portion of the 'defined' part of type 0 headers.  In
5027 	 * theory we also need to save/restore the PCI capability structures
5028 	 * we know about, but apart from power we don't know any that are
5029 	 * writable.
5030 	 */
5031 	dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
5032 	dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
5033 	dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
5034 	dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
5035 	dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
5036 	dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
5037 	dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
5038 	dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
5039 	dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
5040 	dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
5041 	dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
5042 	dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
5043 	dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
5044 	dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
5045 	dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
5046 
5047 	if (dinfo->cfg.pcie.pcie_location != 0)
5048 		pci_cfg_save_pcie(dev, dinfo);
5049 
5050 	if (dinfo->cfg.pcix.pcix_location != 0)
5051 		pci_cfg_save_pcix(dev, dinfo);
5052 
5053 	/*
5054 	 * don't set the state for display devices, base peripherals and
5055 	 * memory devices since bad things happen when they are powered down.
5056 	 * We should (a) have drivers that can easily detach and (b) use
5057 	 * generic drivers for these devices so that some device actually
5058 	 * attaches.  We need to make sure that when we implement (a) we don't
5059 	 * power the device down on a reattach.
5060 	 */
5061 	cls = pci_get_class(dev);
5062 	if (!setstate)
5063 		return;
5064 	switch (pci_do_power_nodriver)
5065 	{
5066 		case 0:		/* NO powerdown at all */
5067 			return;
5068 		case 1:		/* Conservative about what to power down */
5069 			if (cls == PCIC_STORAGE)
5070 				return;
5071 			/*FALLTHROUGH*/
5072 		case 2:		/* Agressive about what to power down */
5073 			if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
5074 			    cls == PCIC_BASEPERIPH)
5075 				return;
5076 			/*FALLTHROUGH*/
5077 		case 3:		/* Power down everything */
5078 			break;
5079 	}
5080 	/*
5081 	 * PCI spec says we can only go into D3 state from D0 state.
5082 	 * Transition from D[12] into D0 before going to D3 state.
5083 	 */
5084 	ps = pci_get_powerstate(dev);
5085 	if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
5086 		pci_set_powerstate(dev, PCI_POWERSTATE_D0);
5087 	if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
5088 		pci_set_powerstate(dev, PCI_POWERSTATE_D3);
5089 }
5090 
5091 /* Wrapper APIs suitable for device driver use. */
5092 void
5093 pci_save_state(device_t dev)
5094 {
5095 	struct pci_devinfo *dinfo;
5096 
5097 	dinfo = device_get_ivars(dev);
5098 	pci_cfg_save(dev, dinfo, 0);
5099 }
5100 
5101 void
5102 pci_restore_state(device_t dev)
5103 {
5104 	struct pci_devinfo *dinfo;
5105 
5106 	dinfo = device_get_ivars(dev);
5107 	pci_cfg_restore(dev, dinfo);
5108 }
5109 
5110 static uint16_t
5111 pci_get_rid_method(device_t dev, device_t child)
5112 {
5113 
5114 	return (PCIB_GET_RID(device_get_parent(dev), child));
5115 }
5116