xref: /freebsd/sys/dev/pci/pci_pci.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier
5  * Copyright (c) 2000 Michael Smith <msmith@freebsd.org>
6  * Copyright (c) 2000 BSDi
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 /*
35  * PCI:PCI bridge support.
36  */
37 
38 #include "opt_pci.h"
39 
40 #include <sys/param.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/mutex.h>
47 #include <sys/pciio.h>
48 #include <sys/rman.h>
49 #include <sys/sysctl.h>
50 #include <sys/systm.h>
51 #include <sys/taskqueue.h>
52 
53 #include <dev/pci/pcivar.h>
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pci_private.h>
56 #include <dev/pci/pcib_private.h>
57 
58 #include "pcib_if.h"
59 
60 static int		pcib_probe(device_t dev);
61 static int		pcib_suspend(device_t dev);
62 static int		pcib_resume(device_t dev);
63 static int		pcib_power_for_sleep(device_t pcib, device_t dev,
64 			    int *pstate);
65 static int		pcib_ari_get_id(device_t pcib, device_t dev,
66     enum pci_id_type type, uintptr_t *id);
67 static uint32_t		pcib_read_config(device_t dev, u_int b, u_int s,
68     u_int f, u_int reg, int width);
69 static void		pcib_write_config(device_t dev, u_int b, u_int s,
70     u_int f, u_int reg, uint32_t val, int width);
71 static int		pcib_ari_maxslots(device_t dev);
72 static int		pcib_ari_maxfuncs(device_t dev);
73 static int		pcib_try_enable_ari(device_t pcib, device_t dev);
74 static int		pcib_ari_enabled(device_t pcib);
75 static void		pcib_ari_decode_rid(device_t pcib, uint16_t rid,
76 			    int *bus, int *slot, int *func);
77 #ifdef PCI_HP
78 static void		pcib_pcie_ab_timeout(void *arg, int pending);
79 static void		pcib_pcie_cc_timeout(void *arg, int pending);
80 static void		pcib_pcie_dll_timeout(void *arg, int pending);
81 #endif
82 static int		pcib_request_feature_default(device_t pcib, device_t dev,
83 			    enum pci_feature feature);
84 static int		pcib_reset_child(device_t dev, device_t child, int flags);
85 
86 static device_method_t pcib_methods[] = {
87     /* Device interface */
88     DEVMETHOD(device_probe,		pcib_probe),
89     DEVMETHOD(device_attach,		pcib_attach),
90     DEVMETHOD(device_detach,		pcib_detach),
91     DEVMETHOD(device_shutdown,		bus_generic_shutdown),
92     DEVMETHOD(device_suspend,		pcib_suspend),
93     DEVMETHOD(device_resume,		pcib_resume),
94 
95     /* Bus interface */
96     DEVMETHOD(bus_child_present,	pcib_child_present),
97     DEVMETHOD(bus_read_ivar,		pcib_read_ivar),
98     DEVMETHOD(bus_write_ivar,		pcib_write_ivar),
99     DEVMETHOD(bus_alloc_resource,	pcib_alloc_resource),
100 #ifdef NEW_PCIB
101     DEVMETHOD(bus_adjust_resource,	pcib_adjust_resource),
102     DEVMETHOD(bus_release_resource,	pcib_release_resource),
103 #else
104     DEVMETHOD(bus_adjust_resource,	bus_generic_adjust_resource),
105     DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
106 #endif
107     DEVMETHOD(bus_activate_resource,	bus_generic_activate_resource),
108     DEVMETHOD(bus_deactivate_resource,	bus_generic_deactivate_resource),
109     DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
110     DEVMETHOD(bus_teardown_intr,	bus_generic_teardown_intr),
111     DEVMETHOD(bus_reset_child,		pcib_reset_child),
112 
113     /* pcib interface */
114     DEVMETHOD(pcib_maxslots,		pcib_ari_maxslots),
115     DEVMETHOD(pcib_maxfuncs,		pcib_ari_maxfuncs),
116     DEVMETHOD(pcib_read_config,		pcib_read_config),
117     DEVMETHOD(pcib_write_config,	pcib_write_config),
118     DEVMETHOD(pcib_route_interrupt,	pcib_route_interrupt),
119     DEVMETHOD(pcib_alloc_msi,		pcib_alloc_msi),
120     DEVMETHOD(pcib_release_msi,		pcib_release_msi),
121     DEVMETHOD(pcib_alloc_msix,		pcib_alloc_msix),
122     DEVMETHOD(pcib_release_msix,	pcib_release_msix),
123     DEVMETHOD(pcib_map_msi,		pcib_map_msi),
124     DEVMETHOD(pcib_power_for_sleep,	pcib_power_for_sleep),
125     DEVMETHOD(pcib_get_id,		pcib_ari_get_id),
126     DEVMETHOD(pcib_try_enable_ari,	pcib_try_enable_ari),
127     DEVMETHOD(pcib_ari_enabled,		pcib_ari_enabled),
128     DEVMETHOD(pcib_decode_rid,		pcib_ari_decode_rid),
129     DEVMETHOD(pcib_request_feature,	pcib_request_feature_default),
130 
131     DEVMETHOD_END
132 };
133 
134 DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc));
135 EARLY_DRIVER_MODULE(pcib, pci, pcib_driver, NULL, NULL, BUS_PASS_BUS);
136 
137 #if defined(NEW_PCIB) || defined(PCI_HP)
138 SYSCTL_DECL(_hw_pci);
139 #endif
140 
141 #ifdef NEW_PCIB
142 static int pci_clear_pcib;
143 SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0,
144     "Clear firmware-assigned resources for PCI-PCI bridge I/O windows.");
145 
146 /*
147  * Get the corresponding window if this resource from a child device was
148  * sub-allocated from one of our window resource managers.
149  */
150 static struct pcib_window *
151 pcib_get_resource_window(struct pcib_softc *sc, int type, struct resource *r)
152 {
153 	switch (type) {
154 	case SYS_RES_IOPORT:
155 		if (rman_is_region_manager(r, &sc->io.rman))
156 			return (&sc->io);
157 		break;
158 	case SYS_RES_MEMORY:
159 		/* Prefetchable resources may live in either memory rman. */
160 		if (rman_get_flags(r) & RF_PREFETCHABLE &&
161 		    rman_is_region_manager(r, &sc->pmem.rman))
162 			return (&sc->pmem);
163 		if (rman_is_region_manager(r, &sc->mem.rman))
164 			return (&sc->mem);
165 		break;
166 	}
167 	return (NULL);
168 }
169 
170 /*
171  * Is a resource from a child device sub-allocated from one of our
172  * resource managers?
173  */
174 static int
175 pcib_is_resource_managed(struct pcib_softc *sc, int type, struct resource *r)
176 {
177 
178 #ifdef PCI_RES_BUS
179 	if (type == PCI_RES_BUS)
180 		return (rman_is_region_manager(r, &sc->bus.rman));
181 #endif
182 	return (pcib_get_resource_window(sc, type, r) != NULL);
183 }
184 
185 static int
186 pcib_is_window_open(struct pcib_window *pw)
187 {
188 
189 	return (pw->valid && pw->base < pw->limit);
190 }
191 
192 /*
193  * XXX: If RF_ACTIVE did not also imply allocating a bus space tag and
194  * handle for the resource, we could pass RF_ACTIVE up to the PCI bus
195  * when allocating the resource windows and rely on the PCI bus driver
196  * to do this for us.
197  */
198 static void
199 pcib_activate_window(struct pcib_softc *sc, int type)
200 {
201 
202 	PCI_ENABLE_IO(device_get_parent(sc->dev), sc->dev, type);
203 }
204 
205 static void
206 pcib_write_windows(struct pcib_softc *sc, int mask)
207 {
208 	device_t dev;
209 	uint32_t val;
210 
211 	dev = sc->dev;
212 	if (sc->io.valid && mask & WIN_IO) {
213 		val = pci_read_config(dev, PCIR_IOBASEL_1, 1);
214 		if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) {
215 			pci_write_config(dev, PCIR_IOBASEH_1,
216 			    sc->io.base >> 16, 2);
217 			pci_write_config(dev, PCIR_IOLIMITH_1,
218 			    sc->io.limit >> 16, 2);
219 		}
220 		pci_write_config(dev, PCIR_IOBASEL_1, sc->io.base >> 8, 1);
221 		pci_write_config(dev, PCIR_IOLIMITL_1, sc->io.limit >> 8, 1);
222 	}
223 
224 	if (mask & WIN_MEM) {
225 		pci_write_config(dev, PCIR_MEMBASE_1, sc->mem.base >> 16, 2);
226 		pci_write_config(dev, PCIR_MEMLIMIT_1, sc->mem.limit >> 16, 2);
227 	}
228 
229 	if (sc->pmem.valid && mask & WIN_PMEM) {
230 		val = pci_read_config(dev, PCIR_PMBASEL_1, 2);
231 		if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) {
232 			pci_write_config(dev, PCIR_PMBASEH_1,
233 			    sc->pmem.base >> 32, 4);
234 			pci_write_config(dev, PCIR_PMLIMITH_1,
235 			    sc->pmem.limit >> 32, 4);
236 		}
237 		pci_write_config(dev, PCIR_PMBASEL_1, sc->pmem.base >> 16, 2);
238 		pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmem.limit >> 16, 2);
239 	}
240 }
241 
242 /*
243  * This is used to reject I/O port allocations that conflict with an
244  * ISA alias range.
245  */
246 static int
247 pcib_is_isa_range(struct pcib_softc *sc, rman_res_t start, rman_res_t end,
248     rman_res_t count)
249 {
250 	rman_res_t next_alias;
251 
252 	if (!(sc->bridgectl & PCIB_BCR_ISA_ENABLE))
253 		return (0);
254 
255 	/* Only check fixed ranges for overlap. */
256 	if (start + count - 1 != end)
257 		return (0);
258 
259 	/* ISA aliases are only in the lower 64KB of I/O space. */
260 	if (start >= 65536)
261 		return (0);
262 
263 	/* Check for overlap with 0x000 - 0x0ff as a special case. */
264 	if (start < 0x100)
265 		goto alias;
266 
267 	/*
268 	 * If the start address is an alias, the range is an alias.
269 	 * Otherwise, compute the start of the next alias range and
270 	 * check if it is before the end of the candidate range.
271 	 */
272 	if ((start & 0x300) != 0)
273 		goto alias;
274 	next_alias = (start & ~0x3fful) | 0x100;
275 	if (next_alias <= end)
276 		goto alias;
277 	return (0);
278 
279 alias:
280 	if (bootverbose)
281 		device_printf(sc->dev,
282 		    "I/O range %#jx-%#jx overlaps with an ISA alias\n", start,
283 		    end);
284 	return (1);
285 }
286 
287 static void
288 pcib_add_window_resources(struct pcib_window *w, struct resource **res,
289     int count)
290 {
291 	struct resource **newarray;
292 	int error, i;
293 
294 	newarray = malloc(sizeof(struct resource *) * (w->count + count),
295 	    M_DEVBUF, M_WAITOK);
296 	if (w->res != NULL)
297 		bcopy(w->res, newarray, sizeof(struct resource *) * w->count);
298 	bcopy(res, newarray + w->count, sizeof(struct resource *) * count);
299 	free(w->res, M_DEVBUF);
300 	w->res = newarray;
301 	w->count += count;
302 
303 	for (i = 0; i < count; i++) {
304 		error = rman_manage_region(&w->rman, rman_get_start(res[i]),
305 		    rman_get_end(res[i]));
306 		if (error)
307 			panic("Failed to add resource to rman");
308 	}
309 }
310 
311 typedef void (nonisa_callback)(rman_res_t start, rman_res_t end, void *arg);
312 
313 static void
314 pcib_walk_nonisa_ranges(rman_res_t start, rman_res_t end, nonisa_callback *cb,
315     void *arg)
316 {
317 	rman_res_t next_end;
318 
319 	/*
320 	 * If start is within an ISA alias range, move up to the start
321 	 * of the next non-alias range.  As a special case, addresses
322 	 * in the range 0x000 - 0x0ff should also be skipped since
323 	 * those are used for various system I/O devices in ISA
324 	 * systems.
325 	 */
326 	if (start <= 65535) {
327 		if (start < 0x100 || (start & 0x300) != 0) {
328 			start &= ~0x3ff;
329 			start += 0x400;
330 		}
331 	}
332 
333 	/* ISA aliases are only in the lower 64KB of I/O space. */
334 	while (start <= MIN(end, 65535)) {
335 		next_end = MIN(start | 0xff, end);
336 		cb(start, next_end, arg);
337 		start += 0x400;
338 	}
339 
340 	if (start <= end)
341 		cb(start, end, arg);
342 }
343 
344 static void
345 count_ranges(rman_res_t start, rman_res_t end, void *arg)
346 {
347 	int *countp;
348 
349 	countp = arg;
350 	(*countp)++;
351 }
352 
353 struct alloc_state {
354 	struct resource **res;
355 	struct pcib_softc *sc;
356 	int count, error;
357 };
358 
359 static void
360 alloc_ranges(rman_res_t start, rman_res_t end, void *arg)
361 {
362 	struct alloc_state *as;
363 	struct pcib_window *w;
364 	int rid;
365 
366 	as = arg;
367 	if (as->error != 0)
368 		return;
369 
370 	w = &as->sc->io;
371 	rid = w->reg;
372 	if (bootverbose)
373 		device_printf(as->sc->dev,
374 		    "allocating non-ISA range %#jx-%#jx\n", start, end);
375 	as->res[as->count] = bus_alloc_resource(as->sc->dev, SYS_RES_IOPORT,
376 	    &rid, start, end, end - start + 1, 0);
377 	if (as->res[as->count] == NULL)
378 		as->error = ENXIO;
379 	else
380 		as->count++;
381 }
382 
383 static int
384 pcib_alloc_nonisa_ranges(struct pcib_softc *sc, rman_res_t start, rman_res_t end)
385 {
386 	struct alloc_state as;
387 	int i, new_count;
388 
389 	/* First, see how many ranges we need. */
390 	new_count = 0;
391 	pcib_walk_nonisa_ranges(start, end, count_ranges, &new_count);
392 
393 	/* Second, allocate the ranges. */
394 	as.res = malloc(sizeof(struct resource *) * new_count, M_DEVBUF,
395 	    M_WAITOK);
396 	as.sc = sc;
397 	as.count = 0;
398 	as.error = 0;
399 	pcib_walk_nonisa_ranges(start, end, alloc_ranges, &as);
400 	if (as.error != 0) {
401 		for (i = 0; i < as.count; i++)
402 			bus_release_resource(sc->dev, SYS_RES_IOPORT,
403 			    sc->io.reg, as.res[i]);
404 		free(as.res, M_DEVBUF);
405 		return (as.error);
406 	}
407 	KASSERT(as.count == new_count, ("%s: count mismatch", __func__));
408 
409 	/* Third, add the ranges to the window. */
410 	pcib_add_window_resources(&sc->io, as.res, as.count);
411 	free(as.res, M_DEVBUF);
412 	return (0);
413 }
414 
415 static void
416 pcib_alloc_window(struct pcib_softc *sc, struct pcib_window *w, int type,
417     int flags, pci_addr_t max_address)
418 {
419 	struct resource *res;
420 	char buf[64];
421 	int error, rid;
422 
423 	if (max_address != (rman_res_t)max_address)
424 		max_address = ~0;
425 	w->rman.rm_start = 0;
426 	w->rman.rm_end = max_address;
427 	w->rman.rm_type = RMAN_ARRAY;
428 	snprintf(buf, sizeof(buf), "%s %s window",
429 	    device_get_nameunit(sc->dev), w->name);
430 	w->rman.rm_descr = strdup(buf, M_DEVBUF);
431 	error = rman_init(&w->rman);
432 	if (error)
433 		panic("Failed to initialize %s %s rman",
434 		    device_get_nameunit(sc->dev), w->name);
435 
436 	if (!pcib_is_window_open(w))
437 		return;
438 
439 	if (w->base > max_address || w->limit > max_address) {
440 		device_printf(sc->dev,
441 		    "initial %s window has too many bits, ignoring\n", w->name);
442 		return;
443 	}
444 	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE)
445 		(void)pcib_alloc_nonisa_ranges(sc, w->base, w->limit);
446 	else {
447 		rid = w->reg;
448 		res = bus_alloc_resource(sc->dev, type, &rid, w->base, w->limit,
449 		    w->limit - w->base + 1, flags);
450 		if (res != NULL)
451 			pcib_add_window_resources(w, &res, 1);
452 	}
453 	if (w->res == NULL) {
454 		device_printf(sc->dev,
455 		    "failed to allocate initial %s window: %#jx-%#jx\n",
456 		    w->name, (uintmax_t)w->base, (uintmax_t)w->limit);
457 		w->base = max_address;
458 		w->limit = 0;
459 		pcib_write_windows(sc, w->mask);
460 		return;
461 	}
462 	pcib_activate_window(sc, type);
463 }
464 
465 /*
466  * Initialize I/O windows.
467  */
468 static void
469 pcib_probe_windows(struct pcib_softc *sc)
470 {
471 	pci_addr_t max;
472 	device_t dev;
473 	uint32_t val;
474 
475 	dev = sc->dev;
476 
477 	if (pci_clear_pcib) {
478 		pcib_bridge_init(dev);
479 	}
480 
481 	/* Determine if the I/O port window is implemented. */
482 	val = pci_read_config(dev, PCIR_IOBASEL_1, 1);
483 	if (val == 0) {
484 		/*
485 		 * If 'val' is zero, then only 16-bits of I/O space
486 		 * are supported.
487 		 */
488 		pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1);
489 		if (pci_read_config(dev, PCIR_IOBASEL_1, 1) != 0) {
490 			sc->io.valid = 1;
491 			pci_write_config(dev, PCIR_IOBASEL_1, 0, 1);
492 		}
493 	} else
494 		sc->io.valid = 1;
495 
496 	/* Read the existing I/O port window. */
497 	if (sc->io.valid) {
498 		sc->io.reg = PCIR_IOBASEL_1;
499 		sc->io.step = 12;
500 		sc->io.mask = WIN_IO;
501 		sc->io.name = "I/O port";
502 		if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) {
503 			sc->io.base = PCI_PPBIOBASE(
504 			    pci_read_config(dev, PCIR_IOBASEH_1, 2), val);
505 			sc->io.limit = PCI_PPBIOLIMIT(
506 			    pci_read_config(dev, PCIR_IOLIMITH_1, 2),
507 			    pci_read_config(dev, PCIR_IOLIMITL_1, 1));
508 			max = 0xffffffff;
509 		} else {
510 			sc->io.base = PCI_PPBIOBASE(0, val);
511 			sc->io.limit = PCI_PPBIOLIMIT(0,
512 			    pci_read_config(dev, PCIR_IOLIMITL_1, 1));
513 			max = 0xffff;
514 		}
515 		pcib_alloc_window(sc, &sc->io, SYS_RES_IOPORT, 0, max);
516 	}
517 
518 	/* Read the existing memory window. */
519 	sc->mem.valid = 1;
520 	sc->mem.reg = PCIR_MEMBASE_1;
521 	sc->mem.step = 20;
522 	sc->mem.mask = WIN_MEM;
523 	sc->mem.name = "memory";
524 	sc->mem.base = PCI_PPBMEMBASE(0,
525 	    pci_read_config(dev, PCIR_MEMBASE_1, 2));
526 	sc->mem.limit = PCI_PPBMEMLIMIT(0,
527 	    pci_read_config(dev, PCIR_MEMLIMIT_1, 2));
528 	pcib_alloc_window(sc, &sc->mem, SYS_RES_MEMORY, 0, 0xffffffff);
529 
530 	/* Determine if the prefetchable memory window is implemented. */
531 	val = pci_read_config(dev, PCIR_PMBASEL_1, 2);
532 	if (val == 0) {
533 		/*
534 		 * If 'val' is zero, then only 32-bits of memory space
535 		 * are supported.
536 		 */
537 		pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2);
538 		if (pci_read_config(dev, PCIR_PMBASEL_1, 2) != 0) {
539 			sc->pmem.valid = 1;
540 			pci_write_config(dev, PCIR_PMBASEL_1, 0, 2);
541 		}
542 	} else
543 		sc->pmem.valid = 1;
544 
545 	/* Read the existing prefetchable memory window. */
546 	if (sc->pmem.valid) {
547 		sc->pmem.reg = PCIR_PMBASEL_1;
548 		sc->pmem.step = 20;
549 		sc->pmem.mask = WIN_PMEM;
550 		sc->pmem.name = "prefetch";
551 		if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) {
552 			sc->pmem.base = PCI_PPBMEMBASE(
553 			    pci_read_config(dev, PCIR_PMBASEH_1, 4), val);
554 			sc->pmem.limit = PCI_PPBMEMLIMIT(
555 			    pci_read_config(dev, PCIR_PMLIMITH_1, 4),
556 			    pci_read_config(dev, PCIR_PMLIMITL_1, 2));
557 			max = 0xffffffffffffffff;
558 		} else {
559 			sc->pmem.base = PCI_PPBMEMBASE(0, val);
560 			sc->pmem.limit = PCI_PPBMEMLIMIT(0,
561 			    pci_read_config(dev, PCIR_PMLIMITL_1, 2));
562 			max = 0xffffffff;
563 		}
564 		pcib_alloc_window(sc, &sc->pmem, SYS_RES_MEMORY,
565 		    RF_PREFETCHABLE, max);
566 	}
567 }
568 
569 static void
570 pcib_release_window(struct pcib_softc *sc, struct pcib_window *w, int type)
571 {
572 	device_t dev;
573 	int error, i;
574 
575 	if (!w->valid)
576 		return;
577 
578 	dev = sc->dev;
579 	error = rman_fini(&w->rman);
580 	if (error) {
581 		device_printf(dev, "failed to release %s rman\n", w->name);
582 		return;
583 	}
584 	free(__DECONST(char *, w->rman.rm_descr), M_DEVBUF);
585 
586 	for (i = 0; i < w->count; i++) {
587 		error = bus_free_resource(dev, type, w->res[i]);
588 		if (error)
589 			device_printf(dev,
590 			    "failed to release %s resource: %d\n", w->name,
591 			    error);
592 	}
593 	free(w->res, M_DEVBUF);
594 }
595 
596 static void
597 pcib_free_windows(struct pcib_softc *sc)
598 {
599 
600 	pcib_release_window(sc, &sc->pmem, SYS_RES_MEMORY);
601 	pcib_release_window(sc, &sc->mem, SYS_RES_MEMORY);
602 	pcib_release_window(sc, &sc->io, SYS_RES_IOPORT);
603 }
604 
605 #ifdef PCI_RES_BUS
606 /*
607  * Allocate a suitable secondary bus for this bridge if needed and
608  * initialize the resource manager for the secondary bus range.  Note
609  * that the minimum count is a desired value and this may allocate a
610  * smaller range.
611  */
612 void
613 pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count)
614 {
615 	char buf[64];
616 	int error, rid, sec_reg;
617 
618 	switch (pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) {
619 	case PCIM_HDRTYPE_BRIDGE:
620 		sec_reg = PCIR_SECBUS_1;
621 		bus->sub_reg = PCIR_SUBBUS_1;
622 		break;
623 	case PCIM_HDRTYPE_CARDBUS:
624 		sec_reg = PCIR_SECBUS_2;
625 		bus->sub_reg = PCIR_SUBBUS_2;
626 		break;
627 	default:
628 		panic("not a PCI bridge");
629 	}
630 	bus->sec = pci_read_config(dev, sec_reg, 1);
631 	bus->sub = pci_read_config(dev, bus->sub_reg, 1);
632 	bus->dev = dev;
633 	bus->rman.rm_start = 0;
634 	bus->rman.rm_end = PCI_BUSMAX;
635 	bus->rman.rm_type = RMAN_ARRAY;
636 	snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
637 	bus->rman.rm_descr = strdup(buf, M_DEVBUF);
638 	error = rman_init(&bus->rman);
639 	if (error)
640 		panic("Failed to initialize %s bus number rman",
641 		    device_get_nameunit(dev));
642 
643 	/*
644 	 * Allocate a bus range.  This will return an existing bus range
645 	 * if one exists, or a new bus range if one does not.
646 	 */
647 	rid = 0;
648 	bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
649 	    min_count, 0);
650 	if (bus->res == NULL) {
651 		/*
652 		 * Fall back to just allocating a range of a single bus
653 		 * number.
654 		 */
655 		bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
656 		    1, 0);
657 	} else if (rman_get_size(bus->res) < min_count)
658 		/*
659 		 * Attempt to grow the existing range to satisfy the
660 		 * minimum desired count.
661 		 */
662 		(void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res,
663 		    rman_get_start(bus->res), rman_get_start(bus->res) +
664 		    min_count - 1);
665 
666 	/*
667 	 * Add the initial resource to the rman.
668 	 */
669 	if (bus->res != NULL) {
670 		error = rman_manage_region(&bus->rman, rman_get_start(bus->res),
671 		    rman_get_end(bus->res));
672 		if (error)
673 			panic("Failed to add resource to rman");
674 		bus->sec = rman_get_start(bus->res);
675 		bus->sub = rman_get_end(bus->res);
676 	}
677 }
678 
679 void
680 pcib_free_secbus(device_t dev, struct pcib_secbus *bus)
681 {
682 	int error;
683 
684 	error = rman_fini(&bus->rman);
685 	if (error) {
686 		device_printf(dev, "failed to release bus number rman\n");
687 		return;
688 	}
689 	free(__DECONST(char *, bus->rman.rm_descr), M_DEVBUF);
690 
691 	error = bus_free_resource(dev, PCI_RES_BUS, bus->res);
692 	if (error)
693 		device_printf(dev,
694 		    "failed to release bus numbers resource: %d\n", error);
695 }
696 
697 static struct resource *
698 pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid,
699     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
700 {
701 	struct resource *res;
702 
703 	res = rman_reserve_resource(&bus->rman, start, end, count, flags,
704 	    child);
705 	if (res == NULL)
706 		return (NULL);
707 
708 	if (bootverbose)
709 		device_printf(bus->dev,
710 		    "allocated bus range (%ju-%ju) for rid %d of %s\n",
711 		    rman_get_start(res), rman_get_end(res), *rid,
712 		    pcib_child_name(child));
713 	rman_set_rid(res, *rid);
714 	return (res);
715 }
716 
717 /*
718  * Attempt to grow the secondary bus range.  This is much simpler than
719  * for I/O windows as the range can only be grown by increasing
720  * subbus.
721  */
722 static int
723 pcib_grow_subbus(struct pcib_secbus *bus, rman_res_t new_end)
724 {
725 	rman_res_t old_end;
726 	int error;
727 
728 	old_end = rman_get_end(bus->res);
729 	KASSERT(new_end > old_end, ("attempt to shrink subbus"));
730 	error = bus_adjust_resource(bus->dev, PCI_RES_BUS, bus->res,
731 	    rman_get_start(bus->res), new_end);
732 	if (error)
733 		return (error);
734 	if (bootverbose)
735 		device_printf(bus->dev, "grew bus range to %ju-%ju\n",
736 		    rman_get_start(bus->res), rman_get_end(bus->res));
737 	error = rman_manage_region(&bus->rman, old_end + 1,
738 	    rman_get_end(bus->res));
739 	if (error)
740 		panic("Failed to add resource to rman");
741 	bus->sub = rman_get_end(bus->res);
742 	pci_write_config(bus->dev, bus->sub_reg, bus->sub, 1);
743 	return (0);
744 }
745 
746 struct resource *
747 pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid,
748     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
749 {
750 	struct resource *res;
751 	rman_res_t start_free, end_free, new_end;
752 
753 	/*
754 	 * First, see if the request can be satisified by the existing
755 	 * bus range.
756 	 */
757 	res = pcib_suballoc_bus(bus, child, rid, start, end, count, flags);
758 	if (res != NULL)
759 		return (res);
760 
761 	/*
762 	 * Figure out a range to grow the bus range.  First, find the
763 	 * first bus number after the last allocated bus in the rman and
764 	 * enforce that as a minimum starting point for the range.
765 	 */
766 	if (rman_last_free_region(&bus->rman, &start_free, &end_free) != 0 ||
767 	    end_free != bus->sub)
768 		start_free = bus->sub + 1;
769 	if (start_free < start)
770 		start_free = start;
771 	new_end = start_free + count - 1;
772 
773 	/*
774 	 * See if this new range would satisfy the request if it
775 	 * succeeds.
776 	 */
777 	if (new_end > end)
778 		return (NULL);
779 
780 	/* Finally, attempt to grow the existing resource. */
781 	if (bootverbose) {
782 		device_printf(bus->dev,
783 		    "attempting to grow bus range for %ju buses\n", count);
784 		printf("\tback candidate range: %ju-%ju\n", start_free,
785 		    new_end);
786 	}
787 	if (pcib_grow_subbus(bus, new_end) == 0)
788 		return (pcib_suballoc_bus(bus, child, rid, start, end, count,
789 		    flags));
790 	return (NULL);
791 }
792 #endif
793 
794 #else
795 
796 /*
797  * Is the prefetch window open (eg, can we allocate memory in it?)
798  */
799 static int
800 pcib_is_prefetch_open(struct pcib_softc *sc)
801 {
802 	return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit);
803 }
804 
805 /*
806  * Is the nonprefetch window open (eg, can we allocate memory in it?)
807  */
808 static int
809 pcib_is_nonprefetch_open(struct pcib_softc *sc)
810 {
811 	return (sc->membase > 0 && sc->membase < sc->memlimit);
812 }
813 
814 /*
815  * Is the io window open (eg, can we allocate ports in it?)
816  */
817 static int
818 pcib_is_io_open(struct pcib_softc *sc)
819 {
820 	return (sc->iobase > 0 && sc->iobase < sc->iolimit);
821 }
822 
823 /*
824  * Get current I/O decode.
825  */
826 static void
827 pcib_get_io_decode(struct pcib_softc *sc)
828 {
829 	device_t	dev;
830 	uint32_t	iolow;
831 
832 	dev = sc->dev;
833 
834 	iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1);
835 	if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32)
836 		sc->iobase = PCI_PPBIOBASE(
837 		    pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow);
838 	else
839 		sc->iobase = PCI_PPBIOBASE(0, iolow);
840 
841 	iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1);
842 	if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32)
843 		sc->iolimit = PCI_PPBIOLIMIT(
844 		    pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow);
845 	else
846 		sc->iolimit = PCI_PPBIOLIMIT(0, iolow);
847 }
848 
849 /*
850  * Get current memory decode.
851  */
852 static void
853 pcib_get_mem_decode(struct pcib_softc *sc)
854 {
855 	device_t	dev;
856 	pci_addr_t	pmemlow;
857 
858 	dev = sc->dev;
859 
860 	sc->membase = PCI_PPBMEMBASE(0,
861 	    pci_read_config(dev, PCIR_MEMBASE_1, 2));
862 	sc->memlimit = PCI_PPBMEMLIMIT(0,
863 	    pci_read_config(dev, PCIR_MEMLIMIT_1, 2));
864 
865 	pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2);
866 	if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64)
867 		sc->pmembase = PCI_PPBMEMBASE(
868 		    pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow);
869 	else
870 		sc->pmembase = PCI_PPBMEMBASE(0, pmemlow);
871 
872 	pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2);
873 	if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64)
874 		sc->pmemlimit = PCI_PPBMEMLIMIT(
875 		    pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow);
876 	else
877 		sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow);
878 }
879 
880 /*
881  * Restore previous I/O decode.
882  */
883 static void
884 pcib_set_io_decode(struct pcib_softc *sc)
885 {
886 	device_t	dev;
887 	uint32_t	iohi;
888 
889 	dev = sc->dev;
890 
891 	iohi = sc->iobase >> 16;
892 	if (iohi > 0)
893 		pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2);
894 	pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1);
895 
896 	iohi = sc->iolimit >> 16;
897 	if (iohi > 0)
898 		pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2);
899 	pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1);
900 }
901 
902 /*
903  * Restore previous memory decode.
904  */
905 static void
906 pcib_set_mem_decode(struct pcib_softc *sc)
907 {
908 	device_t	dev;
909 	pci_addr_t	pmemhi;
910 
911 	dev = sc->dev;
912 
913 	pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2);
914 	pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2);
915 
916 	pmemhi = sc->pmembase >> 32;
917 	if (pmemhi > 0)
918 		pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4);
919 	pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2);
920 
921 	pmemhi = sc->pmemlimit >> 32;
922 	if (pmemhi > 0)
923 		pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4);
924 	pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2);
925 }
926 #endif
927 
928 #ifdef PCI_HP
929 /*
930  * PCI-express HotPlug support.
931  */
932 static int pci_enable_pcie_hp = 1;
933 SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_hp, CTLFLAG_RDTUN,
934     &pci_enable_pcie_hp, 0,
935     "Enable support for native PCI-express HotPlug.");
936 
937 TASKQUEUE_DEFINE_THREAD(pci_hp);
938 
939 static void
940 pcib_probe_hotplug(struct pcib_softc *sc)
941 {
942 	device_t dev;
943 	uint32_t link_cap;
944 	uint16_t link_sta, slot_sta;
945 
946 	if (!pci_enable_pcie_hp)
947 		return;
948 
949 	dev = sc->dev;
950 	if (pci_find_cap(dev, PCIY_EXPRESS, NULL) != 0)
951 		return;
952 
953 	if (!(pcie_read_config(dev, PCIER_FLAGS, 2) & PCIEM_FLAGS_SLOT))
954 		return;
955 
956 	sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4);
957 
958 	if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC) == 0)
959 		return;
960 	link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4);
961 	if ((link_cap & PCIEM_LINK_CAP_DL_ACTIVE) == 0)
962 		return;
963 
964 	/*
965 	 * Some devices report that they have an MRL when they actually
966 	 * do not.  Since they always report that the MRL is open, child
967 	 * devices would be ignored.  Try to detect these devices and
968 	 * ignore their claim of HotPlug support.
969 	 *
970 	 * If there is an open MRL but the Data Link Layer is active,
971 	 * the MRL is not real.
972 	 */
973 	if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) != 0) {
974 		link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
975 		slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
976 		if ((slot_sta & PCIEM_SLOT_STA_MRLSS) != 0 &&
977 		    (link_sta & PCIEM_LINK_STA_DL_ACTIVE) != 0) {
978 			return;
979 		}
980 	}
981 
982 	/*
983 	 * Now that we're sure we want to do hot plug, ask the
984 	 * firmware, if any, if that's OK.
985 	 */
986 	if (pcib_request_feature(dev, PCI_FEATURE_HP) != 0) {
987 		if (bootverbose)
988 			device_printf(dev, "Unable to activate hot plug feature.\n");
989 		return;
990 	}
991 
992 	sc->flags |= PCIB_HOTPLUG;
993 }
994 
995 /*
996  * Send a HotPlug command to the slot control register.  If this slot
997  * uses command completion interrupts and a previous command is still
998  * in progress, then the command is dropped.  Once the previous
999  * command completes or times out, pcib_pcie_hotplug_update() will be
1000  * invoked to post a new command based on the slot's state at that
1001  * time.
1002  */
1003 static void
1004 pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask)
1005 {
1006 	device_t dev;
1007 	uint16_t ctl, new;
1008 
1009 	dev = sc->dev;
1010 
1011 	if (sc->flags & PCIB_HOTPLUG_CMD_PENDING)
1012 		return;
1013 
1014 	ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2);
1015 	new = (ctl & ~mask) | val;
1016 	if (new == ctl)
1017 		return;
1018 	if (bootverbose)
1019 		device_printf(dev, "HotPlug command: %04x -> %04x\n", ctl, new);
1020 	pcie_write_config(dev, PCIER_SLOT_CTL, new, 2);
1021 	if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS) &&
1022 	    (ctl & new) & PCIEM_SLOT_CTL_CCIE) {
1023 		sc->flags |= PCIB_HOTPLUG_CMD_PENDING;
1024 		if (!cold)
1025 			taskqueue_enqueue_timeout(taskqueue_pci_hp,
1026 			    &sc->pcie_cc_task, hz);
1027 	}
1028 }
1029 
1030 static void
1031 pcib_pcie_hotplug_command_completed(struct pcib_softc *sc)
1032 {
1033 	device_t dev;
1034 
1035 	dev = sc->dev;
1036 
1037 	if (bootverbose)
1038 		device_printf(dev, "Command Completed\n");
1039 	if (!(sc->flags & PCIB_HOTPLUG_CMD_PENDING))
1040 		return;
1041 	taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL);
1042 	sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
1043 	wakeup(sc);
1044 }
1045 
1046 /*
1047  * Returns true if a card is fully inserted from the user's
1048  * perspective.  It may not yet be ready for access, but the driver
1049  * can now start enabling access if necessary.
1050  */
1051 static bool
1052 pcib_hotplug_inserted(struct pcib_softc *sc)
1053 {
1054 
1055 	/* Pretend the card isn't present if a detach is forced. */
1056 	if (sc->flags & PCIB_DETACHING)
1057 		return (false);
1058 
1059 	/* Card must be present in the slot. */
1060 	if ((sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS) == 0)
1061 		return (false);
1062 
1063 	/* A power fault implicitly turns off power to the slot. */
1064 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD)
1065 		return (false);
1066 
1067 	/* If the MRL is disengaged, the slot is powered off. */
1068 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP &&
1069 	    (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS) != 0)
1070 		return (false);
1071 
1072 	return (true);
1073 }
1074 
1075 /*
1076  * Returns -1 if the card is fully inserted, powered, and ready for
1077  * access.  Otherwise, returns 0.
1078  */
1079 static int
1080 pcib_hotplug_present(struct pcib_softc *sc)
1081 {
1082 
1083 	/* Card must be inserted. */
1084 	if (!pcib_hotplug_inserted(sc))
1085 		return (0);
1086 
1087 	/* Require the Data Link Layer to be active. */
1088 	if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE))
1089 		return (0);
1090 
1091 	return (-1);
1092 }
1093 
1094 static int pci_enable_pcie_ei = 0;
1095 SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_ei, CTLFLAG_RWTUN,
1096     &pci_enable_pcie_ei, 0,
1097     "Enable support for PCI-express Electromechanical Interlock.");
1098 
1099 static void
1100 pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask,
1101     bool schedule_task)
1102 {
1103 	bool card_inserted, ei_engaged;
1104 
1105 	/* Clear DETACHING if Presence Detect has cleared. */
1106 	if ((sc->pcie_slot_sta & (PCIEM_SLOT_STA_PDC | PCIEM_SLOT_STA_PDS)) ==
1107 	    PCIEM_SLOT_STA_PDC)
1108 		sc->flags &= ~PCIB_DETACHING;
1109 
1110 	card_inserted = pcib_hotplug_inserted(sc);
1111 
1112 	/* Turn the power indicator on if a card is inserted. */
1113 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PIP) {
1114 		mask |= PCIEM_SLOT_CTL_PIC;
1115 		if (card_inserted)
1116 			val |= PCIEM_SLOT_CTL_PI_ON;
1117 		else if (sc->flags & PCIB_DETACH_PENDING)
1118 			val |= PCIEM_SLOT_CTL_PI_BLINK;
1119 		else
1120 			val |= PCIEM_SLOT_CTL_PI_OFF;
1121 	}
1122 
1123 	/* Turn the power on via the Power Controller if a card is inserted. */
1124 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) {
1125 		mask |= PCIEM_SLOT_CTL_PCC;
1126 		if (card_inserted)
1127 			val |= PCIEM_SLOT_CTL_PC_ON;
1128 		else
1129 			val |= PCIEM_SLOT_CTL_PC_OFF;
1130 	}
1131 
1132 	/*
1133 	 * If a card is inserted, enable the Electromechanical
1134 	 * Interlock.  If a card is not inserted (or we are in the
1135 	 * process of detaching), disable the Electromechanical
1136 	 * Interlock.
1137 	 */
1138 	if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP) &&
1139 	    pci_enable_pcie_ei) {
1140 		mask |= PCIEM_SLOT_CTL_EIC;
1141 		ei_engaged = (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) != 0;
1142 		if (card_inserted != ei_engaged)
1143 			val |= PCIEM_SLOT_CTL_EIC;
1144 	}
1145 
1146 	/*
1147 	 * Start a timer to see if the Data Link Layer times out.
1148 	 * Note that we only start the timer if Presence Detect or MRL Sensor
1149 	 * changed on this interrupt.  Stop any scheduled timer if
1150 	 * the Data Link Layer is active.
1151 	 */
1152 	if (card_inserted &&
1153 	    !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) &&
1154 	    sc->pcie_slot_sta &
1155 	    (PCIEM_SLOT_STA_MRLSC | PCIEM_SLOT_STA_PDC)) {
1156 		if (cold)
1157 			device_printf(sc->dev,
1158 			    "Data Link Layer inactive\n");
1159 		else
1160 			taskqueue_enqueue_timeout(taskqueue_pci_hp,
1161 			    &sc->pcie_dll_task, hz);
1162 	} else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)
1163 		taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_dll_task,
1164 		    NULL);
1165 
1166 	pcib_pcie_hotplug_command(sc, val, mask);
1167 
1168 	/*
1169 	 * During attach the child "pci" device is added synchronously;
1170 	 * otherwise, the task is scheduled to manage the child
1171 	 * device.
1172 	 */
1173 	if (schedule_task &&
1174 	    (pcib_hotplug_present(sc) != 0) != (sc->child != NULL))
1175 		taskqueue_enqueue(taskqueue_pci_hp, &sc->pcie_hp_task);
1176 }
1177 
1178 static void
1179 pcib_pcie_intr_hotplug(void *arg)
1180 {
1181 	struct pcib_softc *sc;
1182 	device_t dev;
1183 	uint16_t old_slot_sta;
1184 
1185 	sc = arg;
1186 	dev = sc->dev;
1187 	PCIB_HP_LOCK(sc);
1188 	old_slot_sta = sc->pcie_slot_sta;
1189 	sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
1190 
1191 	/* Clear the events just reported. */
1192 	pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2);
1193 
1194 	if (bootverbose)
1195 		device_printf(dev, "HotPlug interrupt: %#x\n",
1196 		    sc->pcie_slot_sta);
1197 
1198 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_ABP) {
1199 		if (sc->flags & PCIB_DETACH_PENDING) {
1200 			device_printf(dev,
1201 			    "Attention Button Pressed: Detach Cancelled\n");
1202 			sc->flags &= ~PCIB_DETACH_PENDING;
1203 			taskqueue_cancel_timeout(taskqueue_pci_hp,
1204 			    &sc->pcie_ab_task, NULL);
1205 		} else if (old_slot_sta & PCIEM_SLOT_STA_PDS) {
1206 			/* Only initiate detach sequence if device present. */
1207 			device_printf(dev,
1208 		    "Attention Button Pressed: Detaching in 5 seconds\n");
1209 			sc->flags |= PCIB_DETACH_PENDING;
1210 			taskqueue_enqueue_timeout(taskqueue_pci_hp,
1211 			    &sc->pcie_ab_task, 5 * hz);
1212 		}
1213 	}
1214 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD)
1215 		device_printf(dev, "Power Fault Detected\n");
1216 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSC)
1217 		device_printf(dev, "MRL Sensor Changed to %s\n",
1218 		    sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS ? "open" :
1219 		    "closed");
1220 	if (bootverbose && sc->pcie_slot_sta & PCIEM_SLOT_STA_PDC)
1221 		device_printf(dev, "Presence Detect Changed to %s\n",
1222 		    sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS ? "card present" :
1223 		    "empty");
1224 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_CC)
1225 		pcib_pcie_hotplug_command_completed(sc);
1226 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_DLLSC) {
1227 		sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
1228 		if (bootverbose)
1229 			device_printf(dev,
1230 			    "Data Link Layer State Changed to %s\n",
1231 			    sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE ?
1232 			    "active" : "inactive");
1233 	}
1234 
1235 	pcib_pcie_hotplug_update(sc, 0, 0, true);
1236 	PCIB_HP_UNLOCK(sc);
1237 }
1238 
1239 static void
1240 pcib_pcie_hotplug_task(void *context, int pending)
1241 {
1242 	struct pcib_softc *sc;
1243 	device_t dev;
1244 
1245 	sc = context;
1246 	PCIB_HP_LOCK(sc);
1247 	dev = sc->dev;
1248 	if (pcib_hotplug_present(sc) != 0) {
1249 		if (sc->child == NULL) {
1250 			sc->child = device_add_child(dev, "pci", -1);
1251 			bus_generic_attach(dev);
1252 		}
1253 	} else {
1254 		if (sc->child != NULL) {
1255 			if (device_delete_child(dev, sc->child) == 0)
1256 				sc->child = NULL;
1257 		}
1258 	}
1259 	PCIB_HP_UNLOCK(sc);
1260 }
1261 
1262 static void
1263 pcib_pcie_ab_timeout(void *arg, int pending)
1264 {
1265 	struct pcib_softc *sc = arg;
1266 
1267 	PCIB_HP_LOCK(sc);
1268 	if (sc->flags & PCIB_DETACH_PENDING) {
1269 		sc->flags |= PCIB_DETACHING;
1270 		sc->flags &= ~PCIB_DETACH_PENDING;
1271 		pcib_pcie_hotplug_update(sc, 0, 0, true);
1272 	}
1273 	PCIB_HP_UNLOCK(sc);
1274 }
1275 
1276 static void
1277 pcib_pcie_cc_timeout(void *arg, int pending)
1278 {
1279 	struct pcib_softc *sc = arg;
1280 	device_t dev = sc->dev;
1281 	uint16_t sta;
1282 
1283 	PCIB_HP_LOCK(sc);
1284 	sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
1285 	if (!(sta & PCIEM_SLOT_STA_CC)) {
1286 		device_printf(dev, "HotPlug Command Timed Out\n");
1287 		sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
1288 	} else {
1289 		device_printf(dev,
1290 	    "Missed HotPlug interrupt waiting for Command Completion\n");
1291 		pcib_pcie_intr_hotplug(sc);
1292 	}
1293 	PCIB_HP_UNLOCK(sc);
1294 }
1295 
1296 static void
1297 pcib_pcie_dll_timeout(void *arg, int pending)
1298 {
1299 	struct pcib_softc *sc = arg;
1300 	device_t dev = sc->dev;
1301 	uint16_t sta;
1302 
1303 	PCIB_HP_LOCK(sc);
1304 	sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
1305 	if (!(sta & PCIEM_LINK_STA_DL_ACTIVE)) {
1306 		device_printf(dev,
1307 		    "Timed out waiting for Data Link Layer Active\n");
1308 		sc->flags |= PCIB_DETACHING;
1309 		pcib_pcie_hotplug_update(sc, 0, 0, true);
1310 	} else if (sta != sc->pcie_link_sta) {
1311 		device_printf(dev,
1312 		    "Missed HotPlug interrupt waiting for DLL Active\n");
1313 		pcib_pcie_intr_hotplug(sc);
1314 	}
1315 	PCIB_HP_UNLOCK(sc);
1316 }
1317 
1318 static int
1319 pcib_alloc_pcie_irq(struct pcib_softc *sc)
1320 {
1321 	device_t dev;
1322 	int count, error, mem_rid, rid;
1323 
1324 	rid = -1;
1325 	dev = sc->dev;
1326 
1327 	/*
1328 	 * For simplicity, only use MSI-X if there is a single message.
1329 	 * To support a device with multiple messages we would have to
1330 	 * use remap intr if the MSI number is not 0.
1331 	 */
1332 	count = pci_msix_count(dev);
1333 	if (count == 1) {
1334 		mem_rid = pci_msix_table_bar(dev);
1335 		sc->pcie_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1336 		    &mem_rid, RF_ACTIVE);
1337 		if (sc->pcie_mem == NULL) {
1338 			device_printf(dev,
1339 			    "Failed to allocate BAR for MSI-X table\n");
1340 		} else {
1341 			error = pci_alloc_msix(dev, &count);
1342 			if (error == 0)
1343 				rid = 1;
1344 		}
1345 	}
1346 
1347 	if (rid < 0 && pci_msi_count(dev) > 0) {
1348 		count = 1;
1349 		error = pci_alloc_msi(dev, &count);
1350 		if (error == 0)
1351 			rid = 1;
1352 	}
1353 
1354 	if (rid < 0)
1355 		rid = 0;
1356 
1357 	sc->pcie_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1358 	    RF_ACTIVE | RF_SHAREABLE);
1359 	if (sc->pcie_irq == NULL) {
1360 		device_printf(dev,
1361 		    "Failed to allocate interrupt for PCI-e events\n");
1362 		if (rid > 0)
1363 			pci_release_msi(dev);
1364 		return (ENXIO);
1365 	}
1366 
1367 	error = bus_setup_intr(dev, sc->pcie_irq, INTR_TYPE_MISC|INTR_MPSAFE,
1368 	    NULL, pcib_pcie_intr_hotplug, sc, &sc->pcie_ihand);
1369 	if (error) {
1370 		device_printf(dev, "Failed to setup PCI-e interrupt handler\n");
1371 		bus_release_resource(dev, SYS_RES_IRQ, rid, sc->pcie_irq);
1372 		if (rid > 0)
1373 			pci_release_msi(dev);
1374 		return (error);
1375 	}
1376 	return (0);
1377 }
1378 
1379 static int
1380 pcib_release_pcie_irq(struct pcib_softc *sc)
1381 {
1382 	device_t dev;
1383 	int error;
1384 
1385 	dev = sc->dev;
1386 	error = bus_teardown_intr(dev, sc->pcie_irq, sc->pcie_ihand);
1387 	if (error)
1388 		return (error);
1389 	error = bus_free_resource(dev, SYS_RES_IRQ, sc->pcie_irq);
1390 	if (error)
1391 		return (error);
1392 	error = pci_release_msi(dev);
1393 	if (error)
1394 		return (error);
1395 	if (sc->pcie_mem != NULL)
1396 		error = bus_free_resource(dev, SYS_RES_MEMORY, sc->pcie_mem);
1397 	return (error);
1398 }
1399 
1400 static void
1401 pcib_setup_hotplug(struct pcib_softc *sc)
1402 {
1403 	device_t dev;
1404 	uint16_t mask, val;
1405 
1406 	dev = sc->dev;
1407 	TASK_INIT(&sc->pcie_hp_task, 0, pcib_pcie_hotplug_task, sc);
1408 	TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_ab_task, 0,
1409 	    pcib_pcie_ab_timeout, sc);
1410 	TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_cc_task, 0,
1411 	    pcib_pcie_cc_timeout, sc);
1412 	TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_dll_task, 0,
1413 	    pcib_pcie_dll_timeout, sc);
1414 	sc->pcie_hp_lock = bus_topo_mtx();
1415 
1416 	/* Allocate IRQ. */
1417 	if (pcib_alloc_pcie_irq(sc) != 0)
1418 		return;
1419 
1420 	sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
1421 	sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
1422 
1423 	/* Clear any events previously pending. */
1424 	pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2);
1425 
1426 	/* Enable HotPlug events. */
1427 	mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE |
1428 	    PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE |
1429 	    PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE;
1430 	val = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_PDCE;
1431 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_APB)
1432 		val |= PCIEM_SLOT_CTL_ABPE;
1433 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP)
1434 		val |= PCIEM_SLOT_CTL_PFDE;
1435 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP)
1436 		val |= PCIEM_SLOT_CTL_MRLSCE;
1437 	if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS))
1438 		val |= PCIEM_SLOT_CTL_CCIE;
1439 
1440 	/* Turn the attention indicator off. */
1441 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) {
1442 		mask |= PCIEM_SLOT_CTL_AIC;
1443 		val |= PCIEM_SLOT_CTL_AI_OFF;
1444 	}
1445 
1446 	pcib_pcie_hotplug_update(sc, val, mask, false);
1447 }
1448 
1449 static int
1450 pcib_detach_hotplug(struct pcib_softc *sc)
1451 {
1452 	uint16_t mask, val;
1453 	int error;
1454 
1455 	/* Disable the card in the slot and force it to detach. */
1456 	if (sc->flags & PCIB_DETACH_PENDING) {
1457 		sc->flags &= ~PCIB_DETACH_PENDING;
1458 		taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task,
1459 		    NULL);
1460 	}
1461 	sc->flags |= PCIB_DETACHING;
1462 
1463 	if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) {
1464 		taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task,
1465 		    NULL);
1466 		tsleep(sc, 0, "hpcmd", hz);
1467 		sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
1468 	}
1469 
1470 	/* Disable HotPlug events. */
1471 	mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE |
1472 	    PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE |
1473 	    PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE;
1474 	val = 0;
1475 
1476 	/* Turn the attention indicator off. */
1477 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) {
1478 		mask |= PCIEM_SLOT_CTL_AIC;
1479 		val |= PCIEM_SLOT_CTL_AI_OFF;
1480 	}
1481 
1482 	pcib_pcie_hotplug_update(sc, val, mask, false);
1483 
1484 	error = pcib_release_pcie_irq(sc);
1485 	if (error)
1486 		return (error);
1487 	taskqueue_drain(taskqueue_pci_hp, &sc->pcie_hp_task);
1488 	taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_ab_task);
1489 	taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_cc_task);
1490 	taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_dll_task);
1491 	return (0);
1492 }
1493 #endif
1494 
1495 /*
1496  * Get current bridge configuration.
1497  */
1498 static void
1499 pcib_cfg_save(struct pcib_softc *sc)
1500 {
1501 #ifndef NEW_PCIB
1502 	device_t	dev;
1503 	uint16_t command;
1504 
1505 	dev = sc->dev;
1506 
1507 	command = pci_read_config(dev, PCIR_COMMAND, 2);
1508 	if (command & PCIM_CMD_PORTEN)
1509 		pcib_get_io_decode(sc);
1510 	if (command & PCIM_CMD_MEMEN)
1511 		pcib_get_mem_decode(sc);
1512 #endif
1513 }
1514 
1515 /*
1516  * Restore previous bridge configuration.
1517  */
1518 static void
1519 pcib_cfg_restore(struct pcib_softc *sc)
1520 {
1521 #ifndef NEW_PCIB
1522 	uint16_t command;
1523 #endif
1524 
1525 #ifdef NEW_PCIB
1526 	pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM);
1527 #else
1528 	command = pci_read_config(sc->dev, PCIR_COMMAND, 2);
1529 	if (command & PCIM_CMD_PORTEN)
1530 		pcib_set_io_decode(sc);
1531 	if (command & PCIM_CMD_MEMEN)
1532 		pcib_set_mem_decode(sc);
1533 #endif
1534 }
1535 
1536 /*
1537  * Generic device interface
1538  */
1539 static int
1540 pcib_probe(device_t dev)
1541 {
1542     if ((pci_get_class(dev) == PCIC_BRIDGE) &&
1543 	(pci_get_subclass(dev) == PCIS_BRIDGE_PCI)) {
1544 	device_set_desc(dev, "PCI-PCI bridge");
1545 	return(-10000);
1546     }
1547     return(ENXIO);
1548 }
1549 
1550 void
1551 pcib_attach_common(device_t dev)
1552 {
1553     struct pcib_softc	*sc;
1554     struct sysctl_ctx_list *sctx;
1555     struct sysctl_oid	*soid;
1556     int comma;
1557 
1558     sc = device_get_softc(dev);
1559     sc->dev = dev;
1560 
1561     /*
1562      * Get current bridge configuration.
1563      */
1564     sc->domain = pci_get_domain(dev);
1565 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
1566     sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1);
1567     sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1);
1568 #endif
1569     sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2);
1570     pcib_cfg_save(sc);
1571 
1572     /*
1573      * The primary bus register should always be the bus of the
1574      * parent.
1575      */
1576     sc->pribus = pci_get_bus(dev);
1577     pci_write_config(dev, PCIR_PRIBUS_1, sc->pribus, 1);
1578 
1579     /*
1580      * Setup sysctl reporting nodes
1581      */
1582     sctx = device_get_sysctl_ctx(dev);
1583     soid = device_get_sysctl_tree(dev);
1584     SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain",
1585       CTLFLAG_RD, &sc->domain, 0, "Domain number");
1586     SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus",
1587       CTLFLAG_RD, &sc->pribus, 0, "Primary bus number");
1588     SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus",
1589       CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number");
1590     SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus",
1591       CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number");
1592 
1593     /*
1594      * Quirk handling.
1595      */
1596     switch (pci_get_devid(dev)) {
1597 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
1598     case 0x12258086:		/* Intel 82454KX/GX (Orion) */
1599 	{
1600 	    uint8_t	supbus;
1601 
1602 	    supbus = pci_read_config(dev, 0x41, 1);
1603 	    if (supbus != 0xff) {
1604 		sc->bus.sec = supbus + 1;
1605 		sc->bus.sub = supbus + 1;
1606 	    }
1607 	    break;
1608 	}
1609 #endif
1610 
1611     /*
1612      * The i82380FB mobile docking controller is a PCI-PCI bridge,
1613      * and it is a subtractive bridge.  However, the ProgIf is wrong
1614      * so the normal setting of PCIB_SUBTRACTIVE bit doesn't
1615      * happen.  There are also Toshiba and Cavium ThunderX bridges
1616      * that behave this way.
1617      */
1618     case 0xa002177d:		/* Cavium ThunderX */
1619     case 0x124b8086:		/* Intel 82380FB Mobile */
1620     case 0x060513d7:		/* Toshiba ???? */
1621 	sc->flags |= PCIB_SUBTRACTIVE;
1622 	break;
1623 
1624 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
1625     /* Compaq R3000 BIOS sets wrong subordinate bus number. */
1626     case 0x00dd10de:
1627 	{
1628 	    char *cp;
1629 
1630 	    if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
1631 		break;
1632 	    if (strncmp(cp, "Compal", 6) != 0) {
1633 		freeenv(cp);
1634 		break;
1635 	    }
1636 	    freeenv(cp);
1637 	    if ((cp = kern_getenv("smbios.planar.product")) == NULL)
1638 		break;
1639 	    if (strncmp(cp, "08A0", 4) != 0) {
1640 		freeenv(cp);
1641 		break;
1642 	    }
1643 	    freeenv(cp);
1644 	    if (sc->bus.sub < 0xa) {
1645 		pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1);
1646 		sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1);
1647 	    }
1648 	    break;
1649 	}
1650 #endif
1651     }
1652 
1653     if (pci_msi_device_blacklisted(dev))
1654 	sc->flags |= PCIB_DISABLE_MSI;
1655 
1656     if (pci_msix_device_blacklisted(dev))
1657 	sc->flags |= PCIB_DISABLE_MSIX;
1658 
1659     /*
1660      * Intel 815, 845 and other chipsets say they are PCI-PCI bridges,
1661      * but have a ProgIF of 0x80.  The 82801 family (AA, AB, BAM/CAM,
1662      * BA/CA/DB and E) PCI bridges are HUB-PCI bridges, in Intelese.
1663      * This means they act as if they were subtractively decoding
1664      * bridges and pass all transactions.  Mark them and real ProgIf 1
1665      * parts as subtractive.
1666      */
1667     if ((pci_get_devid(dev) & 0xff00ffff) == 0x24008086 ||
1668       pci_read_config(dev, PCIR_PROGIF, 1) == PCIP_BRIDGE_PCI_SUBTRACTIVE)
1669 	sc->flags |= PCIB_SUBTRACTIVE;
1670 
1671 #ifdef PCI_HP
1672     pcib_probe_hotplug(sc);
1673 #endif
1674 #ifdef NEW_PCIB
1675 #ifdef PCI_RES_BUS
1676     pcib_setup_secbus(dev, &sc->bus, 1);
1677 #endif
1678     pcib_probe_windows(sc);
1679 #endif
1680 #ifdef PCI_HP
1681     if (sc->flags & PCIB_HOTPLUG)
1682 	    pcib_setup_hotplug(sc);
1683 #endif
1684     if (bootverbose) {
1685 	device_printf(dev, "  domain            %d\n", sc->domain);
1686 	device_printf(dev, "  secondary bus     %d\n", sc->bus.sec);
1687 	device_printf(dev, "  subordinate bus   %d\n", sc->bus.sub);
1688 #ifdef NEW_PCIB
1689 	if (pcib_is_window_open(&sc->io))
1690 	    device_printf(dev, "  I/O decode        0x%jx-0x%jx\n",
1691 	      (uintmax_t)sc->io.base, (uintmax_t)sc->io.limit);
1692 	if (pcib_is_window_open(&sc->mem))
1693 	    device_printf(dev, "  memory decode     0x%jx-0x%jx\n",
1694 	      (uintmax_t)sc->mem.base, (uintmax_t)sc->mem.limit);
1695 	if (pcib_is_window_open(&sc->pmem))
1696 	    device_printf(dev, "  prefetched decode 0x%jx-0x%jx\n",
1697 	      (uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit);
1698 #else
1699 	if (pcib_is_io_open(sc))
1700 	    device_printf(dev, "  I/O decode        0x%x-0x%x\n",
1701 	      sc->iobase, sc->iolimit);
1702 	if (pcib_is_nonprefetch_open(sc))
1703 	    device_printf(dev, "  memory decode     0x%jx-0x%jx\n",
1704 	      (uintmax_t)sc->membase, (uintmax_t)sc->memlimit);
1705 	if (pcib_is_prefetch_open(sc))
1706 	    device_printf(dev, "  prefetched decode 0x%jx-0x%jx\n",
1707 	      (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit);
1708 #endif
1709 	if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) ||
1710 	    sc->flags & PCIB_SUBTRACTIVE) {
1711 		device_printf(dev, "  special decode    ");
1712 		comma = 0;
1713 		if (sc->bridgectl & PCIB_BCR_ISA_ENABLE) {
1714 			printf("ISA");
1715 			comma = 1;
1716 		}
1717 		if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) {
1718 			printf("%sVGA", comma ? ", " : "");
1719 			comma = 1;
1720 		}
1721 		if (sc->flags & PCIB_SUBTRACTIVE)
1722 			printf("%ssubtractive", comma ? ", " : "");
1723 		printf("\n");
1724 	}
1725     }
1726 
1727     /*
1728      * Always enable busmastering on bridges so that transactions
1729      * initiated on the secondary bus are passed through to the
1730      * primary bus.
1731      */
1732     pci_enable_busmaster(dev);
1733 }
1734 
1735 #ifdef PCI_HP
1736 static int
1737 pcib_present(struct pcib_softc *sc)
1738 {
1739 
1740 	if (sc->flags & PCIB_HOTPLUG)
1741 		return (pcib_hotplug_present(sc) != 0);
1742 	return (1);
1743 }
1744 #endif
1745 
1746 int
1747 pcib_attach_child(device_t dev)
1748 {
1749 	struct pcib_softc *sc;
1750 
1751 	sc = device_get_softc(dev);
1752 	if (sc->bus.sec == 0) {
1753 		/* no secondary bus; we should have fixed this */
1754 		return(0);
1755 	}
1756 
1757 #ifdef PCI_HP
1758 	if (!pcib_present(sc)) {
1759 		/* An empty HotPlug slot, so don't add a PCI bus yet. */
1760 		return (0);
1761 	}
1762 #endif
1763 
1764 	sc->child = device_add_child(dev, "pci", -1);
1765 	return (bus_generic_attach(dev));
1766 }
1767 
1768 int
1769 pcib_attach(device_t dev)
1770 {
1771 
1772     pcib_attach_common(dev);
1773     return (pcib_attach_child(dev));
1774 }
1775 
1776 int
1777 pcib_detach(device_t dev)
1778 {
1779 #if defined(PCI_HP) || defined(NEW_PCIB)
1780 	struct pcib_softc *sc;
1781 #endif
1782 	int error;
1783 
1784 #if defined(PCI_HP) || defined(NEW_PCIB)
1785 	sc = device_get_softc(dev);
1786 #endif
1787 	error = bus_generic_detach(dev);
1788 	if (error)
1789 		return (error);
1790 #ifdef PCI_HP
1791 	if (sc->flags & PCIB_HOTPLUG) {
1792 		error = pcib_detach_hotplug(sc);
1793 		if (error)
1794 			return (error);
1795 	}
1796 #endif
1797 	error = device_delete_children(dev);
1798 	if (error)
1799 		return (error);
1800 #ifdef NEW_PCIB
1801 	pcib_free_windows(sc);
1802 #ifdef PCI_RES_BUS
1803 	pcib_free_secbus(dev, &sc->bus);
1804 #endif
1805 #endif
1806 	return (0);
1807 }
1808 
1809 int
1810 pcib_suspend(device_t dev)
1811 {
1812 
1813 	pcib_cfg_save(device_get_softc(dev));
1814 	return (bus_generic_suspend(dev));
1815 }
1816 
1817 int
1818 pcib_resume(device_t dev)
1819 {
1820 
1821 	pcib_cfg_restore(device_get_softc(dev));
1822 
1823 	/*
1824 	 * Restore the Command register only after restoring the windows.
1825 	 * The bridge should not be claiming random windows.
1826 	 */
1827 	pci_write_config(dev, PCIR_COMMAND, pci_get_cmdreg(dev), 2);
1828 	return (bus_generic_resume(dev));
1829 }
1830 
1831 void
1832 pcib_bridge_init(device_t dev)
1833 {
1834 	pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1);
1835 	pci_write_config(dev, PCIR_IOBASEH_1, 0xffff, 2);
1836 	pci_write_config(dev, PCIR_IOLIMITL_1, 0, 1);
1837 	pci_write_config(dev, PCIR_IOLIMITH_1, 0, 2);
1838 	pci_write_config(dev, PCIR_MEMBASE_1, 0xffff, 2);
1839 	pci_write_config(dev, PCIR_MEMLIMIT_1, 0, 2);
1840 	pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2);
1841 	pci_write_config(dev, PCIR_PMBASEH_1, 0xffffffff, 4);
1842 	pci_write_config(dev, PCIR_PMLIMITL_1, 0, 2);
1843 	pci_write_config(dev, PCIR_PMLIMITH_1, 0, 4);
1844 }
1845 
1846 int
1847 pcib_child_present(device_t dev, device_t child)
1848 {
1849 #ifdef PCI_HP
1850 	struct pcib_softc *sc = device_get_softc(dev);
1851 	int retval;
1852 
1853 	retval = bus_child_present(dev);
1854 	if (retval != 0 && sc->flags & PCIB_HOTPLUG)
1855 		retval = pcib_hotplug_present(sc);
1856 	return (retval);
1857 #else
1858 	return (bus_child_present(dev));
1859 #endif
1860 }
1861 
1862 int
1863 pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1864 {
1865     struct pcib_softc	*sc = device_get_softc(dev);
1866 
1867     switch (which) {
1868     case PCIB_IVAR_DOMAIN:
1869 	*result = sc->domain;
1870 	return(0);
1871     case PCIB_IVAR_BUS:
1872 	*result = sc->bus.sec;
1873 	return(0);
1874     }
1875     return(ENOENT);
1876 }
1877 
1878 int
1879 pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
1880 {
1881 
1882     switch (which) {
1883     case PCIB_IVAR_DOMAIN:
1884 	return(EINVAL);
1885     case PCIB_IVAR_BUS:
1886 	return(EINVAL);
1887     }
1888     return(ENOENT);
1889 }
1890 
1891 #ifdef NEW_PCIB
1892 /*
1893  * Attempt to allocate a resource from the existing resources assigned
1894  * to a window.
1895  */
1896 static struct resource *
1897 pcib_suballoc_resource(struct pcib_softc *sc, struct pcib_window *w,
1898     device_t child, int type, int *rid, rman_res_t start, rman_res_t end,
1899     rman_res_t count, u_int flags)
1900 {
1901 	struct resource *res;
1902 
1903 	if (!pcib_is_window_open(w))
1904 		return (NULL);
1905 
1906 	res = rman_reserve_resource(&w->rman, start, end, count,
1907 	    flags & ~RF_ACTIVE, child);
1908 	if (res == NULL)
1909 		return (NULL);
1910 
1911 	if (bootverbose)
1912 		device_printf(sc->dev,
1913 		    "allocated %s range (%#jx-%#jx) for rid %x of %s\n",
1914 		    w->name, rman_get_start(res), rman_get_end(res), *rid,
1915 		    pcib_child_name(child));
1916 	rman_set_rid(res, *rid);
1917 
1918 	/*
1919 	 * If the resource should be active, pass that request up the
1920 	 * tree.  This assumes the parent drivers can handle
1921 	 * activating sub-allocated resources.
1922 	 */
1923 	if (flags & RF_ACTIVE) {
1924 		if (bus_activate_resource(child, type, *rid, res) != 0) {
1925 			rman_release_resource(res);
1926 			return (NULL);
1927 		}
1928 	}
1929 
1930 	return (res);
1931 }
1932 
1933 /* Allocate a fresh resource range for an unconfigured window. */
1934 static int
1935 pcib_alloc_new_window(struct pcib_softc *sc, struct pcib_window *w, int type,
1936     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1937 {
1938 	struct resource *res;
1939 	rman_res_t base, limit, wmask;
1940 	int rid;
1941 
1942 	/*
1943 	 * If this is an I/O window on a bridge with ISA enable set
1944 	 * and the start address is below 64k, then try to allocate an
1945 	 * initial window of 0x1000 bytes long starting at address
1946 	 * 0xf000 and walking down.  Note that if the original request
1947 	 * was larger than the non-aliased range size of 0x100 our
1948 	 * caller would have raised the start address up to 64k
1949 	 * already.
1950 	 */
1951 	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE &&
1952 	    start < 65536) {
1953 		for (base = 0xf000; (long)base >= 0; base -= 0x1000) {
1954 			limit = base + 0xfff;
1955 
1956 			/*
1957 			 * Skip ranges that wouldn't work for the
1958 			 * original request.  Note that the actual
1959 			 * window that overlaps are the non-alias
1960 			 * ranges within [base, limit], so this isn't
1961 			 * quite a simple comparison.
1962 			 */
1963 			if (start + count > limit - 0x400)
1964 				continue;
1965 			if (base == 0) {
1966 				/*
1967 				 * The first open region for the window at
1968 				 * 0 is 0x400-0x4ff.
1969 				 */
1970 				if (end - count + 1 < 0x400)
1971 					continue;
1972 			} else {
1973 				if (end - count + 1 < base)
1974 					continue;
1975 			}
1976 
1977 			if (pcib_alloc_nonisa_ranges(sc, base, limit) == 0) {
1978 				w->base = base;
1979 				w->limit = limit;
1980 				return (0);
1981 			}
1982 		}
1983 		return (ENOSPC);
1984 	}
1985 
1986 	wmask = ((rman_res_t)1 << w->step) - 1;
1987 	if (RF_ALIGNMENT(flags) < w->step) {
1988 		flags &= ~RF_ALIGNMENT_MASK;
1989 		flags |= RF_ALIGNMENT_LOG2(w->step);
1990 	}
1991 	start &= ~wmask;
1992 	end |= wmask;
1993 	count = roundup2(count, (rman_res_t)1 << w->step);
1994 	rid = w->reg;
1995 	res = bus_alloc_resource(sc->dev, type, &rid, start, end, count,
1996 	    flags & ~RF_ACTIVE);
1997 	if (res == NULL)
1998 		return (ENOSPC);
1999 	pcib_add_window_resources(w, &res, 1);
2000 	pcib_activate_window(sc, type);
2001 	w->base = rman_get_start(res);
2002 	w->limit = rman_get_end(res);
2003 	return (0);
2004 }
2005 
2006 /* Try to expand an existing window to the requested base and limit. */
2007 static int
2008 pcib_expand_window(struct pcib_softc *sc, struct pcib_window *w, int type,
2009     rman_res_t base, rman_res_t limit)
2010 {
2011 	struct resource *res;
2012 	int error, i, force_64k_base;
2013 
2014 	KASSERT(base <= w->base && limit >= w->limit,
2015 	    ("attempting to shrink window"));
2016 
2017 	/*
2018 	 * XXX: pcib_grow_window() doesn't try to do this anyway and
2019 	 * the error handling for all the edge cases would be tedious.
2020 	 */
2021 	KASSERT(limit == w->limit || base == w->base,
2022 	    ("attempting to grow both ends of a window"));
2023 
2024 	/*
2025 	 * Yet more special handling for requests to expand an I/O
2026 	 * window behind an ISA-enabled bridge.  Since I/O windows
2027 	 * have to grow in 0x1000 increments and the end of the 0xffff
2028 	 * range is an alias, growing a window below 64k will always
2029 	 * result in allocating new resources and never adjusting an
2030 	 * existing resource.
2031 	 */
2032 	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE &&
2033 	    (limit <= 65535 || (base <= 65535 && base != w->base))) {
2034 		KASSERT(limit == w->limit || limit <= 65535,
2035 		    ("attempting to grow both ends across 64k ISA alias"));
2036 
2037 		if (base != w->base)
2038 			error = pcib_alloc_nonisa_ranges(sc, base, w->base - 1);
2039 		else
2040 			error = pcib_alloc_nonisa_ranges(sc, w->limit + 1,
2041 			    limit);
2042 		if (error == 0) {
2043 			w->base = base;
2044 			w->limit = limit;
2045 		}
2046 		return (error);
2047 	}
2048 
2049 	/*
2050 	 * Find the existing resource to adjust.  Usually there is only one,
2051 	 * but for an ISA-enabled bridge we might be growing the I/O window
2052 	 * above 64k and need to find the existing resource that maps all
2053 	 * of the area above 64k.
2054 	 */
2055 	for (i = 0; i < w->count; i++) {
2056 		if (rman_get_end(w->res[i]) == w->limit)
2057 			break;
2058 	}
2059 	KASSERT(i != w->count, ("did not find existing resource"));
2060 	res = w->res[i];
2061 
2062 	/*
2063 	 * Usually the resource we found should match the window's
2064 	 * existing range.  The one exception is the ISA-enabled case
2065 	 * mentioned above in which case the resource should start at
2066 	 * 64k.
2067 	 */
2068 	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE &&
2069 	    w->base <= 65535) {
2070 		KASSERT(rman_get_start(res) == 65536,
2071 		    ("existing resource mismatch"));
2072 		force_64k_base = 1;
2073 	} else {
2074 		KASSERT(w->base == rman_get_start(res),
2075 		    ("existing resource mismatch"));
2076 		force_64k_base = 0;
2077 	}
2078 
2079 	error = bus_adjust_resource(sc->dev, type, res, force_64k_base ?
2080 	    rman_get_start(res) : base, limit);
2081 	if (error)
2082 		return (error);
2083 
2084 	/* Add the newly allocated region to the resource manager. */
2085 	if (w->base != base) {
2086 		error = rman_manage_region(&w->rman, base, w->base - 1);
2087 		w->base = base;
2088 	} else {
2089 		error = rman_manage_region(&w->rman, w->limit + 1, limit);
2090 		w->limit = limit;
2091 	}
2092 	if (error) {
2093 		if (bootverbose)
2094 			device_printf(sc->dev,
2095 			    "failed to expand %s resource manager\n", w->name);
2096 		(void)bus_adjust_resource(sc->dev, type, res, force_64k_base ?
2097 		    rman_get_start(res) : w->base, w->limit);
2098 	}
2099 	return (error);
2100 }
2101 
2102 /*
2103  * Attempt to grow a window to make room for a given resource request.
2104  */
2105 static int
2106 pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type,
2107     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
2108 {
2109 	rman_res_t align, start_free, end_free, front, back, wmask;
2110 	int error;
2111 
2112 	/*
2113 	 * Clamp the desired resource range to the maximum address
2114 	 * this window supports.  Reject impossible requests.
2115 	 *
2116 	 * For I/O port requests behind a bridge with the ISA enable
2117 	 * bit set, force large allocations to start above 64k.
2118 	 */
2119 	if (!w->valid)
2120 		return (EINVAL);
2121 	if (sc->bridgectl & PCIB_BCR_ISA_ENABLE && count > 0x100 &&
2122 	    start < 65536)
2123 		start = 65536;
2124 	if (end > w->rman.rm_end)
2125 		end = w->rman.rm_end;
2126 	if (start + count - 1 > end || start + count < start)
2127 		return (EINVAL);
2128 	wmask = ((rman_res_t)1 << w->step) - 1;
2129 
2130 	/*
2131 	 * If there is no resource at all, just try to allocate enough
2132 	 * aligned space for this resource.
2133 	 */
2134 	if (w->res == NULL) {
2135 		error = pcib_alloc_new_window(sc, w, type, start, end, count,
2136 		    flags);
2137 		if (error) {
2138 			if (bootverbose)
2139 				device_printf(sc->dev,
2140 		    "failed to allocate initial %s window (%#jx-%#jx,%#jx)\n",
2141 				    w->name, start, end, count);
2142 			return (error);
2143 		}
2144 		if (bootverbose)
2145 			device_printf(sc->dev,
2146 			    "allocated initial %s window of %#jx-%#jx\n",
2147 			    w->name, (uintmax_t)w->base, (uintmax_t)w->limit);
2148 		goto updatewin;
2149 	}
2150 
2151 	/*
2152 	 * See if growing the window would help.  Compute the minimum
2153 	 * amount of address space needed on both the front and back
2154 	 * ends of the existing window to satisfy the allocation.
2155 	 *
2156 	 * For each end, build a candidate region adjusting for the
2157 	 * required alignment, etc.  If there is a free region at the
2158 	 * edge of the window, grow from the inner edge of the free
2159 	 * region.  Otherwise grow from the window boundary.
2160 	 *
2161 	 * Growing an I/O window below 64k for a bridge with the ISA
2162 	 * enable bit doesn't require any special magic as the step
2163 	 * size of an I/O window (1k) always includes multiple
2164 	 * non-alias ranges when it is grown in either direction.
2165 	 *
2166 	 * XXX: Special case: if w->res is completely empty and the
2167 	 * request size is larger than w->res, we should find the
2168 	 * optimal aligned buffer containing w->res and allocate that.
2169 	 */
2170 	if (bootverbose)
2171 		device_printf(sc->dev,
2172 		    "attempting to grow %s window for (%#jx-%#jx,%#jx)\n",
2173 		    w->name, start, end, count);
2174 	align = (rman_res_t)1 << RF_ALIGNMENT(flags);
2175 	if (start < w->base) {
2176 		if (rman_first_free_region(&w->rman, &start_free, &end_free) !=
2177 		    0 || start_free != w->base)
2178 			end_free = w->base;
2179 		if (end_free > end)
2180 			end_free = end + 1;
2181 
2182 		/* Move end_free down until it is properly aligned. */
2183 		end_free &= ~(align - 1);
2184 		end_free--;
2185 		front = end_free - (count - 1);
2186 
2187 		/*
2188 		 * The resource would now be allocated at (front,
2189 		 * end_free).  Ensure that fits in the (start, end)
2190 		 * bounds.  end_free is checked above.  If 'front' is
2191 		 * ok, ensure it is properly aligned for this window.
2192 		 * Also check for underflow.
2193 		 */
2194 		if (front >= start && front <= end_free) {
2195 			if (bootverbose)
2196 				printf("\tfront candidate range: %#jx-%#jx\n",
2197 				    front, end_free);
2198 			front &= ~wmask;
2199 			front = w->base - front;
2200 		} else
2201 			front = 0;
2202 	} else
2203 		front = 0;
2204 	if (end > w->limit) {
2205 		if (rman_last_free_region(&w->rman, &start_free, &end_free) !=
2206 		    0 || end_free != w->limit)
2207 			start_free = w->limit + 1;
2208 		if (start_free < start)
2209 			start_free = start;
2210 
2211 		/* Move start_free up until it is properly aligned. */
2212 		start_free = roundup2(start_free, align);
2213 		back = start_free + count - 1;
2214 
2215 		/*
2216 		 * The resource would now be allocated at (start_free,
2217 		 * back).  Ensure that fits in the (start, end)
2218 		 * bounds.  start_free is checked above.  If 'back' is
2219 		 * ok, ensure it is properly aligned for this window.
2220 		 * Also check for overflow.
2221 		 */
2222 		if (back <= end && start_free <= back) {
2223 			if (bootverbose)
2224 				printf("\tback candidate range: %#jx-%#jx\n",
2225 				    start_free, back);
2226 			back |= wmask;
2227 			back -= w->limit;
2228 		} else
2229 			back = 0;
2230 	} else
2231 		back = 0;
2232 
2233 	/*
2234 	 * Try to allocate the smallest needed region first.
2235 	 * If that fails, fall back to the other region.
2236 	 */
2237 	error = ENOSPC;
2238 	while (front != 0 || back != 0) {
2239 		if (front != 0 && (front <= back || back == 0)) {
2240 			error = pcib_expand_window(sc, w, type, w->base - front,
2241 			    w->limit);
2242 			if (error == 0)
2243 				break;
2244 			front = 0;
2245 		} else {
2246 			error = pcib_expand_window(sc, w, type, w->base,
2247 			    w->limit + back);
2248 			if (error == 0)
2249 				break;
2250 			back = 0;
2251 		}
2252 	}
2253 
2254 	if (error)
2255 		return (error);
2256 	if (bootverbose)
2257 		device_printf(sc->dev, "grew %s window to %#jx-%#jx\n",
2258 		    w->name, (uintmax_t)w->base, (uintmax_t)w->limit);
2259 
2260 updatewin:
2261 	/* Write the new window. */
2262 	KASSERT((w->base & wmask) == 0, ("start address is not aligned"));
2263 	KASSERT((w->limit & wmask) == wmask, ("end address is not aligned"));
2264 	pcib_write_windows(sc, w->mask);
2265 	return (0);
2266 }
2267 
2268 /*
2269  * We have to trap resource allocation requests and ensure that the bridge
2270  * is set up to, or capable of handling them.
2271  */
2272 struct resource *
2273 pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
2274     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
2275 {
2276 	struct pcib_softc *sc;
2277 	struct resource *r;
2278 
2279 	sc = device_get_softc(dev);
2280 
2281 	/*
2282 	 * VGA resources are decoded iff the VGA enable bit is set in
2283 	 * the bridge control register.  VGA resources do not fall into
2284 	 * the resource windows and are passed up to the parent.
2285 	 */
2286 	if ((type == SYS_RES_IOPORT && pci_is_vga_ioport_range(start, end)) ||
2287 	    (type == SYS_RES_MEMORY && pci_is_vga_memory_range(start, end))) {
2288 		if (sc->bridgectl & PCIB_BCR_VGA_ENABLE)
2289 			return (bus_generic_alloc_resource(dev, child, type,
2290 			    rid, start, end, count, flags));
2291 		else
2292 			return (NULL);
2293 	}
2294 
2295 	switch (type) {
2296 #ifdef PCI_RES_BUS
2297 	case PCI_RES_BUS:
2298 		return (pcib_alloc_subbus(&sc->bus, child, rid, start, end,
2299 		    count, flags));
2300 #endif
2301 	case SYS_RES_IOPORT:
2302 		if (pcib_is_isa_range(sc, start, end, count))
2303 			return (NULL);
2304 		r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start,
2305 		    end, count, flags);
2306 		if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0)
2307 			break;
2308 		if (pcib_grow_window(sc, &sc->io, type, start, end, count,
2309 		    flags) == 0)
2310 			r = pcib_suballoc_resource(sc, &sc->io, child, type,
2311 			    rid, start, end, count, flags);
2312 		break;
2313 	case SYS_RES_MEMORY:
2314 		/*
2315 		 * For prefetchable resources, prefer the prefetchable
2316 		 * memory window, but fall back to the regular memory
2317 		 * window if that fails.  Try both windows before
2318 		 * attempting to grow a window in case the firmware
2319 		 * has used a range in the regular memory window to
2320 		 * map a prefetchable BAR.
2321 		 */
2322 		if (flags & RF_PREFETCHABLE) {
2323 			r = pcib_suballoc_resource(sc, &sc->pmem, child, type,
2324 			    rid, start, end, count, flags);
2325 			if (r != NULL)
2326 				break;
2327 		}
2328 		r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid,
2329 		    start, end, count, flags);
2330 		if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0)
2331 			break;
2332 		if (flags & RF_PREFETCHABLE) {
2333 			if (pcib_grow_window(sc, &sc->pmem, type, start, end,
2334 			    count, flags) == 0) {
2335 				r = pcib_suballoc_resource(sc, &sc->pmem, child,
2336 				    type, rid, start, end, count, flags);
2337 				if (r != NULL)
2338 					break;
2339 			}
2340 		}
2341 		if (pcib_grow_window(sc, &sc->mem, type, start, end, count,
2342 		    flags & ~RF_PREFETCHABLE) == 0)
2343 			r = pcib_suballoc_resource(sc, &sc->mem, child, type,
2344 			    rid, start, end, count, flags);
2345 		break;
2346 	default:
2347 		return (bus_generic_alloc_resource(dev, child, type, rid,
2348 		    start, end, count, flags));
2349 	}
2350 
2351 	/*
2352 	 * If attempts to suballocate from the window fail but this is a
2353 	 * subtractive bridge, pass the request up the tree.
2354 	 */
2355 	if (sc->flags & PCIB_SUBTRACTIVE && r == NULL)
2356 		return (bus_generic_alloc_resource(dev, child, type, rid,
2357 		    start, end, count, flags));
2358 	return (r);
2359 }
2360 
2361 int
2362 pcib_adjust_resource(device_t bus, device_t child, int type, struct resource *r,
2363     rman_res_t start, rman_res_t end)
2364 {
2365 	struct pcib_softc *sc;
2366 	struct pcib_window *w;
2367 	rman_res_t wmask;
2368 	int error;
2369 
2370 	sc = device_get_softc(bus);
2371 
2372 	/*
2373 	 * If the resource wasn't sub-allocated from one of our region
2374 	 * managers then just pass the request up.
2375 	 */
2376 	if (!pcib_is_resource_managed(sc, type, r))
2377 		return (bus_generic_adjust_resource(bus, child, type, r,
2378 		    start, end));
2379 
2380 #ifdef PCI_RES_BUS
2381 	if (type == PCI_RES_BUS) {
2382 		/*
2383 		 * If our bus range isn't big enough to grow the sub-allocation
2384 		 * then we need to grow our bus range. Any request that would
2385 		 * require us to decrease the start of our own bus range is
2386 		 * invalid, we can only extend the end; ignore such requests
2387 		 * and let rman_adjust_resource fail below.
2388 		 */
2389 		if (start >= sc->bus.sec && end > sc->bus.sub) {
2390 			error = pcib_grow_subbus(&sc->bus, end);
2391 			if (error != 0)
2392 				return (error);
2393 		}
2394 	} else
2395 #endif
2396 	{
2397 		/*
2398 		 * Resource is managed and not a secondary bus number, must
2399 		 * be from one of our windows.
2400 		 */
2401 		w = pcib_get_resource_window(sc, type, r);
2402 		KASSERT(w != NULL,
2403 		    ("%s: no window for resource (%#jx-%#jx) type %d",
2404 		    __func__, rman_get_start(r), rman_get_end(r), type));
2405 
2406 		/*
2407 		 * If our window isn't big enough to grow the sub-allocation
2408 		 * then we need to expand the window.
2409 		 */
2410 		if (start < w->base || end > w->limit) {
2411 			wmask = ((rman_res_t)1 << w->step) - 1;
2412 			error = pcib_expand_window(sc, w, type,
2413 			    MIN(start & ~wmask, w->base),
2414 			    MAX(end | wmask, w->limit));
2415 			if (error != 0)
2416 				return (error);
2417 			if (bootverbose)
2418 				device_printf(sc->dev,
2419 				    "grew %s window to %#jx-%#jx\n",
2420 				    w->name, (uintmax_t)w->base,
2421 				    (uintmax_t)w->limit);
2422 			pcib_write_windows(sc, w->mask);
2423 		}
2424 	}
2425 
2426 	return (rman_adjust_resource(r, start, end));
2427 }
2428 
2429 int
2430 pcib_release_resource(device_t dev, device_t child, int type, int rid,
2431     struct resource *r)
2432 {
2433 	struct pcib_softc *sc;
2434 	int error;
2435 
2436 	sc = device_get_softc(dev);
2437 	if (pcib_is_resource_managed(sc, type, r)) {
2438 		if (rman_get_flags(r) & RF_ACTIVE) {
2439 			error = bus_deactivate_resource(child, type, rid, r);
2440 			if (error)
2441 				return (error);
2442 		}
2443 		return (rman_release_resource(r));
2444 	}
2445 	return (bus_generic_release_resource(dev, child, type, rid, r));
2446 }
2447 #else
2448 /*
2449  * We have to trap resource allocation requests and ensure that the bridge
2450  * is set up to, or capable of handling them.
2451  */
2452 struct resource *
2453 pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
2454     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
2455 {
2456 	struct pcib_softc	*sc = device_get_softc(dev);
2457 	const char *name, *suffix;
2458 	int ok;
2459 
2460 	/*
2461 	 * Fail the allocation for this range if it's not supported.
2462 	 */
2463 	name = device_get_nameunit(child);
2464 	if (name == NULL) {
2465 		name = "";
2466 		suffix = "";
2467 	} else
2468 		suffix = " ";
2469 	switch (type) {
2470 	case SYS_RES_IOPORT:
2471 		ok = 0;
2472 		if (!pcib_is_io_open(sc))
2473 			break;
2474 		ok = (start >= sc->iobase && end <= sc->iolimit);
2475 
2476 		/*
2477 		 * Make sure we allow access to VGA I/O addresses when the
2478 		 * bridge has the "VGA Enable" bit set.
2479 		 */
2480 		if (!ok && pci_is_vga_ioport_range(start, end))
2481 			ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0;
2482 
2483 		if ((sc->flags & PCIB_SUBTRACTIVE) == 0) {
2484 			if (!ok) {
2485 				if (start < sc->iobase)
2486 					start = sc->iobase;
2487 				if (end > sc->iolimit)
2488 					end = sc->iolimit;
2489 				if (start < end)
2490 					ok = 1;
2491 			}
2492 		} else {
2493 			ok = 1;
2494 #if 0
2495 			/*
2496 			 * If we overlap with the subtractive range, then
2497 			 * pick the upper range to use.
2498 			 */
2499 			if (start < sc->iolimit && end > sc->iobase)
2500 				start = sc->iolimit + 1;
2501 #endif
2502 		}
2503 		if (end < start) {
2504 			device_printf(dev, "ioport: end (%jx) < start (%jx)\n",
2505 			    end, start);
2506 			start = 0;
2507 			end = 0;
2508 			ok = 0;
2509 		}
2510 		if (!ok) {
2511 			device_printf(dev, "%s%srequested unsupported I/O "
2512 			    "range 0x%jx-0x%jx (decoding 0x%x-0x%x)\n",
2513 			    name, suffix, start, end, sc->iobase, sc->iolimit);
2514 			return (NULL);
2515 		}
2516 		if (bootverbose)
2517 			device_printf(dev,
2518 			    "%s%srequested I/O range 0x%jx-0x%jx: in range\n",
2519 			    name, suffix, start, end);
2520 		break;
2521 
2522 	case SYS_RES_MEMORY:
2523 		ok = 0;
2524 		if (pcib_is_nonprefetch_open(sc))
2525 			ok = ok || (start >= sc->membase && end <= sc->memlimit);
2526 		if (pcib_is_prefetch_open(sc))
2527 			ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit);
2528 
2529 		/*
2530 		 * Make sure we allow access to VGA memory addresses when the
2531 		 * bridge has the "VGA Enable" bit set.
2532 		 */
2533 		if (!ok && pci_is_vga_memory_range(start, end))
2534 			ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0;
2535 
2536 		if ((sc->flags & PCIB_SUBTRACTIVE) == 0) {
2537 			if (!ok) {
2538 				ok = 1;
2539 				if (flags & RF_PREFETCHABLE) {
2540 					if (pcib_is_prefetch_open(sc)) {
2541 						if (start < sc->pmembase)
2542 							start = sc->pmembase;
2543 						if (end > sc->pmemlimit)
2544 							end = sc->pmemlimit;
2545 					} else {
2546 						ok = 0;
2547 					}
2548 				} else {	/* non-prefetchable */
2549 					if (pcib_is_nonprefetch_open(sc)) {
2550 						if (start < sc->membase)
2551 							start = sc->membase;
2552 						if (end > sc->memlimit)
2553 							end = sc->memlimit;
2554 					} else {
2555 						ok = 0;
2556 					}
2557 				}
2558 			}
2559 		} else if (!ok) {
2560 			ok = 1;	/* subtractive bridge: always ok */
2561 #if 0
2562 			if (pcib_is_nonprefetch_open(sc)) {
2563 				if (start < sc->memlimit && end > sc->membase)
2564 					start = sc->memlimit + 1;
2565 			}
2566 			if (pcib_is_prefetch_open(sc)) {
2567 				if (start < sc->pmemlimit && end > sc->pmembase)
2568 					start = sc->pmemlimit + 1;
2569 			}
2570 #endif
2571 		}
2572 		if (end < start) {
2573 			device_printf(dev, "memory: end (%jx) < start (%jx)\n",
2574 			    end, start);
2575 			start = 0;
2576 			end = 0;
2577 			ok = 0;
2578 		}
2579 		if (!ok && bootverbose)
2580 			device_printf(dev,
2581 			    "%s%srequested unsupported memory range %#jx-%#jx "
2582 			    "(decoding %#jx-%#jx, %#jx-%#jx)\n",
2583 			    name, suffix, start, end,
2584 			    (uintmax_t)sc->membase, (uintmax_t)sc->memlimit,
2585 			    (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit);
2586 		if (!ok)
2587 			return (NULL);
2588 		if (bootverbose)
2589 			device_printf(dev,"%s%srequested memory range "
2590 			    "0x%jx-0x%jx: good\n",
2591 			    name, suffix, start, end);
2592 		break;
2593 
2594 	default:
2595 		break;
2596 	}
2597 	/*
2598 	 * Bridge is OK decoding this resource, so pass it up.
2599 	 */
2600 	return (bus_generic_alloc_resource(dev, child, type, rid, start, end,
2601 	    count, flags));
2602 }
2603 #endif
2604 
2605 /*
2606  * If ARI is enabled on this downstream port, translate the function number
2607  * to the non-ARI slot/function.  The downstream port will convert it back in
2608  * hardware.  If ARI is not enabled slot and func are not modified.
2609  */
2610 static __inline void
2611 pcib_xlate_ari(device_t pcib, int bus, int *slot, int *func)
2612 {
2613 	struct pcib_softc *sc;
2614 	int ari_func;
2615 
2616 	sc = device_get_softc(pcib);
2617 	ari_func = *func;
2618 
2619 	if (sc->flags & PCIB_ENABLE_ARI) {
2620 		KASSERT(*slot == 0,
2621 		    ("Non-zero slot number with ARI enabled!"));
2622 		*slot = PCIE_ARI_SLOT(ari_func);
2623 		*func = PCIE_ARI_FUNC(ari_func);
2624 	}
2625 }
2626 
2627 static void
2628 pcib_enable_ari(struct pcib_softc *sc, uint32_t pcie_pos)
2629 {
2630 	uint32_t ctl2;
2631 
2632 	ctl2 = pci_read_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, 4);
2633 	ctl2 |= PCIEM_CTL2_ARI;
2634 	pci_write_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, ctl2, 4);
2635 
2636 	sc->flags |= PCIB_ENABLE_ARI;
2637 }
2638 
2639 /*
2640  * PCIB interface.
2641  */
2642 int
2643 pcib_maxslots(device_t dev)
2644 {
2645 #if !defined(__amd64__) && !defined(__i386__)
2646 	uint32_t pcie_pos;
2647 	uint16_t val;
2648 
2649 	/*
2650 	 * If this is a PCIe rootport or downstream switch port, there's only
2651 	 * one slot permitted.
2652 	 */
2653 	if (pci_find_cap(dev, PCIY_EXPRESS, &pcie_pos) == 0) {
2654 		val = pci_read_config(dev, pcie_pos + PCIER_FLAGS, 2);
2655 		val &= PCIEM_FLAGS_TYPE;
2656 		if (val == PCIEM_TYPE_ROOT_PORT ||
2657 		    val == PCIEM_TYPE_DOWNSTREAM_PORT)
2658 			return (0);
2659 	}
2660 #endif
2661 	return (PCI_SLOTMAX);
2662 }
2663 
2664 static int
2665 pcib_ari_maxslots(device_t dev)
2666 {
2667 	struct pcib_softc *sc;
2668 
2669 	sc = device_get_softc(dev);
2670 
2671 	if (sc->flags & PCIB_ENABLE_ARI)
2672 		return (PCIE_ARI_SLOTMAX);
2673 	else
2674 		return (pcib_maxslots(dev));
2675 }
2676 
2677 static int
2678 pcib_ari_maxfuncs(device_t dev)
2679 {
2680 	struct pcib_softc *sc;
2681 
2682 	sc = device_get_softc(dev);
2683 
2684 	if (sc->flags & PCIB_ENABLE_ARI)
2685 		return (PCIE_ARI_FUNCMAX);
2686 	else
2687 		return (PCI_FUNCMAX);
2688 }
2689 
2690 static void
2691 pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot,
2692     int *func)
2693 {
2694 	struct pcib_softc *sc;
2695 
2696 	sc = device_get_softc(pcib);
2697 
2698 	*bus = PCI_RID2BUS(rid);
2699 	if (sc->flags & PCIB_ENABLE_ARI) {
2700 		*slot = PCIE_ARI_RID2SLOT(rid);
2701 		*func = PCIE_ARI_RID2FUNC(rid);
2702 	} else {
2703 		*slot = PCI_RID2SLOT(rid);
2704 		*func = PCI_RID2FUNC(rid);
2705 	}
2706 }
2707 
2708 /*
2709  * Since we are a child of a PCI bus, its parent must support the pcib interface.
2710  */
2711 static uint32_t
2712 pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
2713 {
2714 #ifdef PCI_HP
2715 	struct pcib_softc *sc;
2716 
2717 	sc = device_get_softc(dev);
2718 	if (!pcib_present(sc)) {
2719 		switch (width) {
2720 		case 2:
2721 			return (0xffff);
2722 		case 1:
2723 			return (0xff);
2724 		default:
2725 			return (0xffffffff);
2726 		}
2727 	}
2728 #endif
2729 	pcib_xlate_ari(dev, b, &s, &f);
2730 	return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s,
2731 	    f, reg, width));
2732 }
2733 
2734 static void
2735 pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width)
2736 {
2737 #ifdef PCI_HP
2738 	struct pcib_softc *sc;
2739 
2740 	sc = device_get_softc(dev);
2741 	if (!pcib_present(sc))
2742 		return;
2743 #endif
2744 	pcib_xlate_ari(dev, b, &s, &f);
2745 	PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f,
2746 	    reg, val, width);
2747 }
2748 
2749 /*
2750  * Route an interrupt across a PCI bridge.
2751  */
2752 int
2753 pcib_route_interrupt(device_t pcib, device_t dev, int pin)
2754 {
2755     device_t	bus;
2756     int		parent_intpin;
2757     int		intnum;
2758 
2759     /*
2760      *
2761      * The PCI standard defines a swizzle of the child-side device/intpin to
2762      * the parent-side intpin as follows.
2763      *
2764      * device = device on child bus
2765      * child_intpin = intpin on child bus slot (0-3)
2766      * parent_intpin = intpin on parent bus slot (0-3)
2767      *
2768      * parent_intpin = (device + child_intpin) % 4
2769      */
2770     parent_intpin = (pci_get_slot(dev) + (pin - 1)) % 4;
2771 
2772     /*
2773      * Our parent is a PCI bus.  Its parent must export the pcib interface
2774      * which includes the ability to route interrupts.
2775      */
2776     bus = device_get_parent(pcib);
2777     intnum = PCIB_ROUTE_INTERRUPT(device_get_parent(bus), pcib, parent_intpin + 1);
2778     if (PCI_INTERRUPT_VALID(intnum) && bootverbose) {
2779 	device_printf(pcib, "slot %d INT%c is routed to irq %d\n",
2780 	    pci_get_slot(dev), 'A' + pin - 1, intnum);
2781     }
2782     return(intnum);
2783 }
2784 
2785 /* Pass request to alloc MSI/MSI-X messages up to the parent bridge. */
2786 int
2787 pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs)
2788 {
2789 	struct pcib_softc *sc = device_get_softc(pcib);
2790 	device_t bus;
2791 
2792 	if (sc->flags & PCIB_DISABLE_MSI)
2793 		return (ENXIO);
2794 	bus = device_get_parent(pcib);
2795 	return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount,
2796 	    irqs));
2797 }
2798 
2799 /* Pass request to release MSI/MSI-X messages up to the parent bridge. */
2800 int
2801 pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs)
2802 {
2803 	device_t bus;
2804 
2805 	bus = device_get_parent(pcib);
2806 	return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs));
2807 }
2808 
2809 /* Pass request to alloc an MSI-X message up to the parent bridge. */
2810 int
2811 pcib_alloc_msix(device_t pcib, device_t dev, int *irq)
2812 {
2813 	struct pcib_softc *sc = device_get_softc(pcib);
2814 	device_t bus;
2815 
2816 	if (sc->flags & PCIB_DISABLE_MSIX)
2817 		return (ENXIO);
2818 	bus = device_get_parent(pcib);
2819 	return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq));
2820 }
2821 
2822 /* Pass request to release an MSI-X message up to the parent bridge. */
2823 int
2824 pcib_release_msix(device_t pcib, device_t dev, int irq)
2825 {
2826 	device_t bus;
2827 
2828 	bus = device_get_parent(pcib);
2829 	return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq));
2830 }
2831 
2832 /* Pass request to map MSI/MSI-X message up to parent bridge. */
2833 int
2834 pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr,
2835     uint32_t *data)
2836 {
2837 	device_t bus;
2838 	int error;
2839 
2840 	bus = device_get_parent(pcib);
2841 	error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data);
2842 	if (error)
2843 		return (error);
2844 
2845 	pci_ht_map_msi(pcib, *addr);
2846 	return (0);
2847 }
2848 
2849 /* Pass request for device power state up to parent bridge. */
2850 int
2851 pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate)
2852 {
2853 	device_t bus;
2854 
2855 	bus = device_get_parent(pcib);
2856 	return (PCIB_POWER_FOR_SLEEP(bus, dev, pstate));
2857 }
2858 
2859 static int
2860 pcib_ari_enabled(device_t pcib)
2861 {
2862 	struct pcib_softc *sc;
2863 
2864 	sc = device_get_softc(pcib);
2865 
2866 	return ((sc->flags & PCIB_ENABLE_ARI) != 0);
2867 }
2868 
2869 static int
2870 pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type,
2871     uintptr_t *id)
2872 {
2873 	struct pcib_softc *sc;
2874 	device_t bus_dev;
2875 	uint8_t bus, slot, func;
2876 
2877 	if (type != PCI_ID_RID) {
2878 		bus_dev = device_get_parent(pcib);
2879 		return (PCIB_GET_ID(device_get_parent(bus_dev), dev, type, id));
2880 	}
2881 
2882 	sc = device_get_softc(pcib);
2883 
2884 	if (sc->flags & PCIB_ENABLE_ARI) {
2885 		bus = pci_get_bus(dev);
2886 		func = pci_get_function(dev);
2887 
2888 		*id = (PCI_ARI_RID(bus, func));
2889 	} else {
2890 		bus = pci_get_bus(dev);
2891 		slot = pci_get_slot(dev);
2892 		func = pci_get_function(dev);
2893 
2894 		*id = (PCI_RID(bus, slot, func));
2895 	}
2896 
2897 	return (0);
2898 }
2899 
2900 /*
2901  * Check that the downstream port (pcib) and the endpoint device (dev) both
2902  * support ARI.  If so, enable it and return 0, otherwise return an error.
2903  */
2904 static int
2905 pcib_try_enable_ari(device_t pcib, device_t dev)
2906 {
2907 	struct pcib_softc *sc;
2908 	int error;
2909 	uint32_t cap2;
2910 	int ari_cap_off;
2911 	uint32_t ari_ver;
2912 	uint32_t pcie_pos;
2913 
2914 	sc = device_get_softc(pcib);
2915 
2916 	/*
2917 	 * ARI is controlled in a register in the PCIe capability structure.
2918 	 * If the downstream port does not have the PCIe capability structure
2919 	 * then it does not support ARI.
2920 	 */
2921 	error = pci_find_cap(pcib, PCIY_EXPRESS, &pcie_pos);
2922 	if (error != 0)
2923 		return (ENODEV);
2924 
2925 	/* Check that the PCIe port advertises ARI support. */
2926 	cap2 = pci_read_config(pcib, pcie_pos + PCIER_DEVICE_CAP2, 4);
2927 	if (!(cap2 & PCIEM_CAP2_ARI))
2928 		return (ENODEV);
2929 
2930 	/*
2931 	 * Check that the endpoint device advertises ARI support via the ARI
2932 	 * extended capability structure.
2933 	 */
2934 	error = pci_find_extcap(dev, PCIZ_ARI, &ari_cap_off);
2935 	if (error != 0)
2936 		return (ENODEV);
2937 
2938 	/*
2939 	 * Finally, check that the endpoint device supports the same version
2940 	 * of ARI that we do.
2941 	 */
2942 	ari_ver = pci_read_config(dev, ari_cap_off, 4);
2943 	if (PCI_EXTCAP_VER(ari_ver) != PCIB_SUPPORTED_ARI_VER) {
2944 		if (bootverbose)
2945 			device_printf(pcib,
2946 			    "Unsupported version of ARI (%d) detected\n",
2947 			    PCI_EXTCAP_VER(ari_ver));
2948 
2949 		return (ENXIO);
2950 	}
2951 
2952 	pcib_enable_ari(sc, pcie_pos);
2953 
2954 	return (0);
2955 }
2956 
2957 int
2958 pcib_request_feature_allow(device_t pcib, device_t dev,
2959     enum pci_feature feature)
2960 {
2961 	/*
2962 	 * No host firmware we have to negotiate with, so we allow
2963 	 * every valid feature requested.
2964 	 */
2965 	switch (feature) {
2966 	case PCI_FEATURE_AER:
2967 	case PCI_FEATURE_HP:
2968 		break;
2969 	default:
2970 		return (EINVAL);
2971 	}
2972 
2973 	return (0);
2974 }
2975 
2976 int
2977 pcib_request_feature(device_t dev, enum pci_feature feature)
2978 {
2979 
2980 	/*
2981 	 * Invoke PCIB_REQUEST_FEATURE of this bridge first in case
2982 	 * the firmware overrides the method of PCI-PCI bridges.
2983 	 */
2984 	return (PCIB_REQUEST_FEATURE(dev, dev, feature));
2985 }
2986 
2987 /*
2988  * Pass the request to use this PCI feature up the tree. Either there's a
2989  * firmware like ACPI that's using this feature that will approve (or deny) the
2990  * request to take it over, or the platform has no such firmware, in which case
2991  * the request will be approved. If the request is approved, the OS is expected
2992  * to make use of the feature or render it harmless.
2993  */
2994 static int
2995 pcib_request_feature_default(device_t pcib, device_t dev,
2996     enum pci_feature feature)
2997 {
2998 	device_t bus;
2999 
3000 	/*
3001 	 * Our parent is necessarily a pci bus. Its parent will either be
3002 	 * another pci bridge (which passes it up) or a host bridge that can
3003 	 * approve or reject the request.
3004 	 */
3005 	bus = device_get_parent(pcib);
3006 	return (PCIB_REQUEST_FEATURE(device_get_parent(bus), dev, feature));
3007 }
3008 
3009 static int
3010 pcib_reset_child(device_t dev, device_t child, int flags)
3011 {
3012 	struct pci_devinfo *pdinfo;
3013 	int error;
3014 
3015 	error = 0;
3016 	if (dev == NULL || device_get_parent(child) != dev)
3017 		goto out;
3018 	error = ENXIO;
3019 	if (device_get_devclass(child) != devclass_find("pci"))
3020 		goto out;
3021 	pdinfo = device_get_ivars(dev);
3022 	if (pdinfo->cfg.pcie.pcie_location != 0 &&
3023 	    (pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT ||
3024 	    pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)) {
3025 		error = bus_helper_reset_prepare(child, flags);
3026 		if (error == 0) {
3027 			error = pcie_link_reset(dev,
3028 			    pdinfo->cfg.pcie.pcie_location);
3029 			/* XXXKIB call _post even if error != 0 ? */
3030 			bus_helper_reset_post(child, flags);
3031 		}
3032 	}
3033 out:
3034 	return (error);
3035 }
3036