xref: /freebsd/sys/dev/pci/pci_pci.c (revision 4f52dfbb8d6c4d446500c5b097e3806ec219fbd4)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier
5  * Copyright (c) 2000 Michael Smith <msmith@freebsd.org>
6  * Copyright (c) 2000 BSDi
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 /*
37  * PCI:PCI bridge support.
38  */
39 
40 #include "opt_pci.h"
41 
42 #include <sys/param.h>
43 #include <sys/bus.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/rman.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50 #include <sys/taskqueue.h>
51 
52 #include <dev/pci/pcivar.h>
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pci_private.h>
55 #include <dev/pci/pcib_private.h>
56 
57 #include "pcib_if.h"
58 
59 static int		pcib_probe(device_t dev);
60 static int		pcib_suspend(device_t dev);
61 static int		pcib_resume(device_t dev);
62 static int		pcib_power_for_sleep(device_t pcib, device_t dev,
63 			    int *pstate);
64 static int		pcib_ari_get_id(device_t pcib, device_t dev,
65     enum pci_id_type type, uintptr_t *id);
66 static uint32_t		pcib_read_config(device_t dev, u_int b, u_int s,
67     u_int f, u_int reg, int width);
68 static void		pcib_write_config(device_t dev, u_int b, u_int s,
69     u_int f, u_int reg, uint32_t val, int width);
70 static int		pcib_ari_maxslots(device_t dev);
71 static int		pcib_ari_maxfuncs(device_t dev);
72 static int		pcib_try_enable_ari(device_t pcib, device_t dev);
73 static int		pcib_ari_enabled(device_t pcib);
74 static void		pcib_ari_decode_rid(device_t pcib, uint16_t rid,
75 			    int *bus, int *slot, int *func);
76 #ifdef PCI_HP
77 static void		pcib_pcie_ab_timeout(void *arg);
78 static void		pcib_pcie_cc_timeout(void *arg);
79 static void		pcib_pcie_dll_timeout(void *arg);
80 #endif
81 static int		pcib_request_feature_default(device_t pcib, device_t dev,
82 			    enum pci_feature feature);
83 
84 static device_method_t pcib_methods[] = {
85     /* Device interface */
86     DEVMETHOD(device_probe,		pcib_probe),
87     DEVMETHOD(device_attach,		pcib_attach),
88     DEVMETHOD(device_detach,		pcib_detach),
89     DEVMETHOD(device_shutdown,		bus_generic_shutdown),
90     DEVMETHOD(device_suspend,		pcib_suspend),
91     DEVMETHOD(device_resume,		pcib_resume),
92 
93     /* Bus interface */
94     DEVMETHOD(bus_child_present,	pcib_child_present),
95     DEVMETHOD(bus_read_ivar,		pcib_read_ivar),
96     DEVMETHOD(bus_write_ivar,		pcib_write_ivar),
97     DEVMETHOD(bus_alloc_resource,	pcib_alloc_resource),
98 #ifdef NEW_PCIB
99     DEVMETHOD(bus_adjust_resource,	pcib_adjust_resource),
100     DEVMETHOD(bus_release_resource,	pcib_release_resource),
101 #else
102     DEVMETHOD(bus_adjust_resource,	bus_generic_adjust_resource),
103     DEVMETHOD(bus_release_resource,	bus_generic_release_resource),
104 #endif
105     DEVMETHOD(bus_activate_resource,	bus_generic_activate_resource),
106     DEVMETHOD(bus_deactivate_resource,	bus_generic_deactivate_resource),
107     DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
108     DEVMETHOD(bus_teardown_intr,	bus_generic_teardown_intr),
109 
110     /* pcib interface */
111     DEVMETHOD(pcib_maxslots,		pcib_ari_maxslots),
112     DEVMETHOD(pcib_maxfuncs,		pcib_ari_maxfuncs),
113     DEVMETHOD(pcib_read_config,		pcib_read_config),
114     DEVMETHOD(pcib_write_config,	pcib_write_config),
115     DEVMETHOD(pcib_route_interrupt,	pcib_route_interrupt),
116     DEVMETHOD(pcib_alloc_msi,		pcib_alloc_msi),
117     DEVMETHOD(pcib_release_msi,		pcib_release_msi),
118     DEVMETHOD(pcib_alloc_msix,		pcib_alloc_msix),
119     DEVMETHOD(pcib_release_msix,	pcib_release_msix),
120     DEVMETHOD(pcib_map_msi,		pcib_map_msi),
121     DEVMETHOD(pcib_power_for_sleep,	pcib_power_for_sleep),
122     DEVMETHOD(pcib_get_id,		pcib_ari_get_id),
123     DEVMETHOD(pcib_try_enable_ari,	pcib_try_enable_ari),
124     DEVMETHOD(pcib_ari_enabled,		pcib_ari_enabled),
125     DEVMETHOD(pcib_decode_rid,		pcib_ari_decode_rid),
126     DEVMETHOD(pcib_request_feature,	pcib_request_feature_default),
127 
128     DEVMETHOD_END
129 };
130 
131 static devclass_t pcib_devclass;
132 
133 DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc));
134 DRIVER_MODULE(pcib, pci, pcib_driver, pcib_devclass, NULL, NULL);
135 
136 #if defined(NEW_PCIB) || defined(PCI_HP)
137 SYSCTL_DECL(_hw_pci);
138 #endif
139 
140 #ifdef NEW_PCIB
141 static int pci_clear_pcib;
142 SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0,
143     "Clear firmware-assigned resources for PCI-PCI bridge I/O windows.");
144 
145 /*
146  * Is a resource from a child device sub-allocated from one of our
147  * resource managers?
148  */
149 static int
150 pcib_is_resource_managed(struct pcib_softc *sc, int type, struct resource *r)
151 {
152 
153 	switch (type) {
154 #ifdef PCI_RES_BUS
155 	case PCI_RES_BUS:
156 		return (rman_is_region_manager(r, &sc->bus.rman));
157 #endif
158 	case SYS_RES_IOPORT:
159 		return (rman_is_region_manager(r, &sc->io.rman));
160 	case SYS_RES_MEMORY:
161 		/* Prefetchable resources may live in either memory rman. */
162 		if (rman_get_flags(r) & RF_PREFETCHABLE &&
163 		    rman_is_region_manager(r, &sc->pmem.rman))
164 			return (1);
165 		return (rman_is_region_manager(r, &sc->mem.rman));
166 	}
167 	return (0);
168 }
169 
170 static int
171 pcib_is_window_open(struct pcib_window *pw)
172 {
173 
174 	return (pw->valid && pw->base < pw->limit);
175 }
176 
177 /*
178  * XXX: If RF_ACTIVE did not also imply allocating a bus space tag and
179  * handle for the resource, we could pass RF_ACTIVE up to the PCI bus
180  * when allocating the resource windows and rely on the PCI bus driver
181  * to do this for us.
182  */
183 static void
184 pcib_activate_window(struct pcib_softc *sc, int type)
185 {
186 
187 	PCI_ENABLE_IO(device_get_parent(sc->dev), sc->dev, type);
188 }
189 
190 static void
191 pcib_write_windows(struct pcib_softc *sc, int mask)
192 {
193 	device_t dev;
194 	uint32_t val;
195 
196 	dev = sc->dev;
197 	if (sc->io.valid && mask & WIN_IO) {
198 		val = pci_read_config(dev, PCIR_IOBASEL_1, 1);
199 		if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) {
200 			pci_write_config(dev, PCIR_IOBASEH_1,
201 			    sc->io.base >> 16, 2);
202 			pci_write_config(dev, PCIR_IOLIMITH_1,
203 			    sc->io.limit >> 16, 2);
204 		}
205 		pci_write_config(dev, PCIR_IOBASEL_1, sc->io.base >> 8, 1);
206 		pci_write_config(dev, PCIR_IOLIMITL_1, sc->io.limit >> 8, 1);
207 	}
208 
209 	if (mask & WIN_MEM) {
210 		pci_write_config(dev, PCIR_MEMBASE_1, sc->mem.base >> 16, 2);
211 		pci_write_config(dev, PCIR_MEMLIMIT_1, sc->mem.limit >> 16, 2);
212 	}
213 
214 	if (sc->pmem.valid && mask & WIN_PMEM) {
215 		val = pci_read_config(dev, PCIR_PMBASEL_1, 2);
216 		if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) {
217 			pci_write_config(dev, PCIR_PMBASEH_1,
218 			    sc->pmem.base >> 32, 4);
219 			pci_write_config(dev, PCIR_PMLIMITH_1,
220 			    sc->pmem.limit >> 32, 4);
221 		}
222 		pci_write_config(dev, PCIR_PMBASEL_1, sc->pmem.base >> 16, 2);
223 		pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmem.limit >> 16, 2);
224 	}
225 }
226 
227 /*
228  * This is used to reject I/O port allocations that conflict with an
229  * ISA alias range.
230  */
231 static int
232 pcib_is_isa_range(struct pcib_softc *sc, rman_res_t start, rman_res_t end,
233     rman_res_t count)
234 {
235 	rman_res_t next_alias;
236 
237 	if (!(sc->bridgectl & PCIB_BCR_ISA_ENABLE))
238 		return (0);
239 
240 	/* Only check fixed ranges for overlap. */
241 	if (start + count - 1 != end)
242 		return (0);
243 
244 	/* ISA aliases are only in the lower 64KB of I/O space. */
245 	if (start >= 65536)
246 		return (0);
247 
248 	/* Check for overlap with 0x000 - 0x0ff as a special case. */
249 	if (start < 0x100)
250 		goto alias;
251 
252 	/*
253 	 * If the start address is an alias, the range is an alias.
254 	 * Otherwise, compute the start of the next alias range and
255 	 * check if it is before the end of the candidate range.
256 	 */
257 	if ((start & 0x300) != 0)
258 		goto alias;
259 	next_alias = (start & ~0x3fful) | 0x100;
260 	if (next_alias <= end)
261 		goto alias;
262 	return (0);
263 
264 alias:
265 	if (bootverbose)
266 		device_printf(sc->dev,
267 		    "I/O range %#jx-%#jx overlaps with an ISA alias\n", start,
268 		    end);
269 	return (1);
270 }
271 
272 static void
273 pcib_add_window_resources(struct pcib_window *w, struct resource **res,
274     int count)
275 {
276 	struct resource **newarray;
277 	int error, i;
278 
279 	newarray = malloc(sizeof(struct resource *) * (w->count + count),
280 	    M_DEVBUF, M_WAITOK);
281 	if (w->res != NULL)
282 		bcopy(w->res, newarray, sizeof(struct resource *) * w->count);
283 	bcopy(res, newarray + w->count, sizeof(struct resource *) * count);
284 	free(w->res, M_DEVBUF);
285 	w->res = newarray;
286 	w->count += count;
287 
288 	for (i = 0; i < count; i++) {
289 		error = rman_manage_region(&w->rman, rman_get_start(res[i]),
290 		    rman_get_end(res[i]));
291 		if (error)
292 			panic("Failed to add resource to rman");
293 	}
294 }
295 
296 typedef void (nonisa_callback)(rman_res_t start, rman_res_t end, void *arg);
297 
298 static void
299 pcib_walk_nonisa_ranges(rman_res_t start, rman_res_t end, nonisa_callback *cb,
300     void *arg)
301 {
302 	rman_res_t next_end;
303 
304 	/*
305 	 * If start is within an ISA alias range, move up to the start
306 	 * of the next non-alias range.  As a special case, addresses
307 	 * in the range 0x000 - 0x0ff should also be skipped since
308 	 * those are used for various system I/O devices in ISA
309 	 * systems.
310 	 */
311 	if (start <= 65535) {
312 		if (start < 0x100 || (start & 0x300) != 0) {
313 			start &= ~0x3ff;
314 			start += 0x400;
315 		}
316 	}
317 
318 	/* ISA aliases are only in the lower 64KB of I/O space. */
319 	while (start <= MIN(end, 65535)) {
320 		next_end = MIN(start | 0xff, end);
321 		cb(start, next_end, arg);
322 		start += 0x400;
323 	}
324 
325 	if (start <= end)
326 		cb(start, end, arg);
327 }
328 
329 static void
330 count_ranges(rman_res_t start, rman_res_t end, void *arg)
331 {
332 	int *countp;
333 
334 	countp = arg;
335 	(*countp)++;
336 }
337 
338 struct alloc_state {
339 	struct resource **res;
340 	struct pcib_softc *sc;
341 	int count, error;
342 };
343 
344 static void
345 alloc_ranges(rman_res_t start, rman_res_t end, void *arg)
346 {
347 	struct alloc_state *as;
348 	struct pcib_window *w;
349 	int rid;
350 
351 	as = arg;
352 	if (as->error != 0)
353 		return;
354 
355 	w = &as->sc->io;
356 	rid = w->reg;
357 	if (bootverbose)
358 		device_printf(as->sc->dev,
359 		    "allocating non-ISA range %#jx-%#jx\n", start, end);
360 	as->res[as->count] = bus_alloc_resource(as->sc->dev, SYS_RES_IOPORT,
361 	    &rid, start, end, end - start + 1, 0);
362 	if (as->res[as->count] == NULL)
363 		as->error = ENXIO;
364 	else
365 		as->count++;
366 }
367 
368 static int
369 pcib_alloc_nonisa_ranges(struct pcib_softc *sc, rman_res_t start, rman_res_t end)
370 {
371 	struct alloc_state as;
372 	int i, new_count;
373 
374 	/* First, see how many ranges we need. */
375 	new_count = 0;
376 	pcib_walk_nonisa_ranges(start, end, count_ranges, &new_count);
377 
378 	/* Second, allocate the ranges. */
379 	as.res = malloc(sizeof(struct resource *) * new_count, M_DEVBUF,
380 	    M_WAITOK);
381 	as.sc = sc;
382 	as.count = 0;
383 	as.error = 0;
384 	pcib_walk_nonisa_ranges(start, end, alloc_ranges, &as);
385 	if (as.error != 0) {
386 		for (i = 0; i < as.count; i++)
387 			bus_release_resource(sc->dev, SYS_RES_IOPORT,
388 			    sc->io.reg, as.res[i]);
389 		free(as.res, M_DEVBUF);
390 		return (as.error);
391 	}
392 	KASSERT(as.count == new_count, ("%s: count mismatch", __func__));
393 
394 	/* Third, add the ranges to the window. */
395 	pcib_add_window_resources(&sc->io, as.res, as.count);
396 	free(as.res, M_DEVBUF);
397 	return (0);
398 }
399 
400 static void
401 pcib_alloc_window(struct pcib_softc *sc, struct pcib_window *w, int type,
402     int flags, pci_addr_t max_address)
403 {
404 	struct resource *res;
405 	char buf[64];
406 	int error, rid;
407 
408 	if (max_address != (rman_res_t)max_address)
409 		max_address = ~0;
410 	w->rman.rm_start = 0;
411 	w->rman.rm_end = max_address;
412 	w->rman.rm_type = RMAN_ARRAY;
413 	snprintf(buf, sizeof(buf), "%s %s window",
414 	    device_get_nameunit(sc->dev), w->name);
415 	w->rman.rm_descr = strdup(buf, M_DEVBUF);
416 	error = rman_init(&w->rman);
417 	if (error)
418 		panic("Failed to initialize %s %s rman",
419 		    device_get_nameunit(sc->dev), w->name);
420 
421 	if (!pcib_is_window_open(w))
422 		return;
423 
424 	if (w->base > max_address || w->limit > max_address) {
425 		device_printf(sc->dev,
426 		    "initial %s window has too many bits, ignoring\n", w->name);
427 		return;
428 	}
429 	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE)
430 		(void)pcib_alloc_nonisa_ranges(sc, w->base, w->limit);
431 	else {
432 		rid = w->reg;
433 		res = bus_alloc_resource(sc->dev, type, &rid, w->base, w->limit,
434 		    w->limit - w->base + 1, flags);
435 		if (res != NULL)
436 			pcib_add_window_resources(w, &res, 1);
437 	}
438 	if (w->res == NULL) {
439 		device_printf(sc->dev,
440 		    "failed to allocate initial %s window: %#jx-%#jx\n",
441 		    w->name, (uintmax_t)w->base, (uintmax_t)w->limit);
442 		w->base = max_address;
443 		w->limit = 0;
444 		pcib_write_windows(sc, w->mask);
445 		return;
446 	}
447 	pcib_activate_window(sc, type);
448 }
449 
450 /*
451  * Initialize I/O windows.
452  */
453 static void
454 pcib_probe_windows(struct pcib_softc *sc)
455 {
456 	pci_addr_t max;
457 	device_t dev;
458 	uint32_t val;
459 
460 	dev = sc->dev;
461 
462 	if (pci_clear_pcib) {
463 		pcib_bridge_init(dev);
464 	}
465 
466 	/* Determine if the I/O port window is implemented. */
467 	val = pci_read_config(dev, PCIR_IOBASEL_1, 1);
468 	if (val == 0) {
469 		/*
470 		 * If 'val' is zero, then only 16-bits of I/O space
471 		 * are supported.
472 		 */
473 		pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1);
474 		if (pci_read_config(dev, PCIR_IOBASEL_1, 1) != 0) {
475 			sc->io.valid = 1;
476 			pci_write_config(dev, PCIR_IOBASEL_1, 0, 1);
477 		}
478 	} else
479 		sc->io.valid = 1;
480 
481 	/* Read the existing I/O port window. */
482 	if (sc->io.valid) {
483 		sc->io.reg = PCIR_IOBASEL_1;
484 		sc->io.step = 12;
485 		sc->io.mask = WIN_IO;
486 		sc->io.name = "I/O port";
487 		if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) {
488 			sc->io.base = PCI_PPBIOBASE(
489 			    pci_read_config(dev, PCIR_IOBASEH_1, 2), val);
490 			sc->io.limit = PCI_PPBIOLIMIT(
491 			    pci_read_config(dev, PCIR_IOLIMITH_1, 2),
492 			    pci_read_config(dev, PCIR_IOLIMITL_1, 1));
493 			max = 0xffffffff;
494 		} else {
495 			sc->io.base = PCI_PPBIOBASE(0, val);
496 			sc->io.limit = PCI_PPBIOLIMIT(0,
497 			    pci_read_config(dev, PCIR_IOLIMITL_1, 1));
498 			max = 0xffff;
499 		}
500 		pcib_alloc_window(sc, &sc->io, SYS_RES_IOPORT, 0, max);
501 	}
502 
503 	/* Read the existing memory window. */
504 	sc->mem.valid = 1;
505 	sc->mem.reg = PCIR_MEMBASE_1;
506 	sc->mem.step = 20;
507 	sc->mem.mask = WIN_MEM;
508 	sc->mem.name = "memory";
509 	sc->mem.base = PCI_PPBMEMBASE(0,
510 	    pci_read_config(dev, PCIR_MEMBASE_1, 2));
511 	sc->mem.limit = PCI_PPBMEMLIMIT(0,
512 	    pci_read_config(dev, PCIR_MEMLIMIT_1, 2));
513 	pcib_alloc_window(sc, &sc->mem, SYS_RES_MEMORY, 0, 0xffffffff);
514 
515 	/* Determine if the prefetchable memory window is implemented. */
516 	val = pci_read_config(dev, PCIR_PMBASEL_1, 2);
517 	if (val == 0) {
518 		/*
519 		 * If 'val' is zero, then only 32-bits of memory space
520 		 * are supported.
521 		 */
522 		pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2);
523 		if (pci_read_config(dev, PCIR_PMBASEL_1, 2) != 0) {
524 			sc->pmem.valid = 1;
525 			pci_write_config(dev, PCIR_PMBASEL_1, 0, 2);
526 		}
527 	} else
528 		sc->pmem.valid = 1;
529 
530 	/* Read the existing prefetchable memory window. */
531 	if (sc->pmem.valid) {
532 		sc->pmem.reg = PCIR_PMBASEL_1;
533 		sc->pmem.step = 20;
534 		sc->pmem.mask = WIN_PMEM;
535 		sc->pmem.name = "prefetch";
536 		if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) {
537 			sc->pmem.base = PCI_PPBMEMBASE(
538 			    pci_read_config(dev, PCIR_PMBASEH_1, 4), val);
539 			sc->pmem.limit = PCI_PPBMEMLIMIT(
540 			    pci_read_config(dev, PCIR_PMLIMITH_1, 4),
541 			    pci_read_config(dev, PCIR_PMLIMITL_1, 2));
542 			max = 0xffffffffffffffff;
543 		} else {
544 			sc->pmem.base = PCI_PPBMEMBASE(0, val);
545 			sc->pmem.limit = PCI_PPBMEMLIMIT(0,
546 			    pci_read_config(dev, PCIR_PMLIMITL_1, 2));
547 			max = 0xffffffff;
548 		}
549 		pcib_alloc_window(sc, &sc->pmem, SYS_RES_MEMORY,
550 		    RF_PREFETCHABLE, max);
551 	}
552 }
553 
554 static void
555 pcib_release_window(struct pcib_softc *sc, struct pcib_window *w, int type)
556 {
557 	device_t dev;
558 	int error, i;
559 
560 	if (!w->valid)
561 		return;
562 
563 	dev = sc->dev;
564 	error = rman_fini(&w->rman);
565 	if (error) {
566 		device_printf(dev, "failed to release %s rman\n", w->name);
567 		return;
568 	}
569 	free(__DECONST(char *, w->rman.rm_descr), M_DEVBUF);
570 
571 	for (i = 0; i < w->count; i++) {
572 		error = bus_free_resource(dev, type, w->res[i]);
573 		if (error)
574 			device_printf(dev,
575 			    "failed to release %s resource: %d\n", w->name,
576 			    error);
577 	}
578 	free(w->res, M_DEVBUF);
579 }
580 
581 static void
582 pcib_free_windows(struct pcib_softc *sc)
583 {
584 
585 	pcib_release_window(sc, &sc->pmem, SYS_RES_MEMORY);
586 	pcib_release_window(sc, &sc->mem, SYS_RES_MEMORY);
587 	pcib_release_window(sc, &sc->io, SYS_RES_IOPORT);
588 }
589 
590 #ifdef PCI_RES_BUS
591 /*
592  * Allocate a suitable secondary bus for this bridge if needed and
593  * initialize the resource manager for the secondary bus range.  Note
594  * that the minimum count is a desired value and this may allocate a
595  * smaller range.
596  */
597 void
598 pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count)
599 {
600 	char buf[64];
601 	int error, rid, sec_reg;
602 
603 	switch (pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) {
604 	case PCIM_HDRTYPE_BRIDGE:
605 		sec_reg = PCIR_SECBUS_1;
606 		bus->sub_reg = PCIR_SUBBUS_1;
607 		break;
608 	case PCIM_HDRTYPE_CARDBUS:
609 		sec_reg = PCIR_SECBUS_2;
610 		bus->sub_reg = PCIR_SUBBUS_2;
611 		break;
612 	default:
613 		panic("not a PCI bridge");
614 	}
615 	bus->sec = pci_read_config(dev, sec_reg, 1);
616 	bus->sub = pci_read_config(dev, bus->sub_reg, 1);
617 	bus->dev = dev;
618 	bus->rman.rm_start = 0;
619 	bus->rman.rm_end = PCI_BUSMAX;
620 	bus->rman.rm_type = RMAN_ARRAY;
621 	snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev));
622 	bus->rman.rm_descr = strdup(buf, M_DEVBUF);
623 	error = rman_init(&bus->rman);
624 	if (error)
625 		panic("Failed to initialize %s bus number rman",
626 		    device_get_nameunit(dev));
627 
628 	/*
629 	 * Allocate a bus range.  This will return an existing bus range
630 	 * if one exists, or a new bus range if one does not.
631 	 */
632 	rid = 0;
633 	bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
634 	    min_count, 0);
635 	if (bus->res == NULL) {
636 		/*
637 		 * Fall back to just allocating a range of a single bus
638 		 * number.
639 		 */
640 		bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid,
641 		    1, 0);
642 	} else if (rman_get_size(bus->res) < min_count)
643 		/*
644 		 * Attempt to grow the existing range to satisfy the
645 		 * minimum desired count.
646 		 */
647 		(void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res,
648 		    rman_get_start(bus->res), rman_get_start(bus->res) +
649 		    min_count - 1);
650 
651 	/*
652 	 * Add the initial resource to the rman.
653 	 */
654 	if (bus->res != NULL) {
655 		error = rman_manage_region(&bus->rman, rman_get_start(bus->res),
656 		    rman_get_end(bus->res));
657 		if (error)
658 			panic("Failed to add resource to rman");
659 		bus->sec = rman_get_start(bus->res);
660 		bus->sub = rman_get_end(bus->res);
661 	}
662 }
663 
664 void
665 pcib_free_secbus(device_t dev, struct pcib_secbus *bus)
666 {
667 	int error;
668 
669 	error = rman_fini(&bus->rman);
670 	if (error) {
671 		device_printf(dev, "failed to release bus number rman\n");
672 		return;
673 	}
674 	free(__DECONST(char *, bus->rman.rm_descr), M_DEVBUF);
675 
676 	error = bus_free_resource(dev, PCI_RES_BUS, bus->res);
677 	if (error)
678 		device_printf(dev,
679 		    "failed to release bus numbers resource: %d\n", error);
680 }
681 
682 static struct resource *
683 pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid,
684     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
685 {
686 	struct resource *res;
687 
688 	res = rman_reserve_resource(&bus->rman, start, end, count, flags,
689 	    child);
690 	if (res == NULL)
691 		return (NULL);
692 
693 	if (bootverbose)
694 		device_printf(bus->dev,
695 		    "allocated bus range (%ju-%ju) for rid %d of %s\n",
696 		    rman_get_start(res), rman_get_end(res), *rid,
697 		    pcib_child_name(child));
698 	rman_set_rid(res, *rid);
699 	return (res);
700 }
701 
702 /*
703  * Attempt to grow the secondary bus range.  This is much simpler than
704  * for I/O windows as the range can only be grown by increasing
705  * subbus.
706  */
707 static int
708 pcib_grow_subbus(struct pcib_secbus *bus, rman_res_t new_end)
709 {
710 	rman_res_t old_end;
711 	int error;
712 
713 	old_end = rman_get_end(bus->res);
714 	KASSERT(new_end > old_end, ("attempt to shrink subbus"));
715 	error = bus_adjust_resource(bus->dev, PCI_RES_BUS, bus->res,
716 	    rman_get_start(bus->res), new_end);
717 	if (error)
718 		return (error);
719 	if (bootverbose)
720 		device_printf(bus->dev, "grew bus range to %ju-%ju\n",
721 		    rman_get_start(bus->res), rman_get_end(bus->res));
722 	error = rman_manage_region(&bus->rman, old_end + 1,
723 	    rman_get_end(bus->res));
724 	if (error)
725 		panic("Failed to add resource to rman");
726 	bus->sub = rman_get_end(bus->res);
727 	pci_write_config(bus->dev, bus->sub_reg, bus->sub, 1);
728 	return (0);
729 }
730 
731 struct resource *
732 pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid,
733     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
734 {
735 	struct resource *res;
736 	rman_res_t start_free, end_free, new_end;
737 
738 	/*
739 	 * First, see if the request can be satisified by the existing
740 	 * bus range.
741 	 */
742 	res = pcib_suballoc_bus(bus, child, rid, start, end, count, flags);
743 	if (res != NULL)
744 		return (res);
745 
746 	/*
747 	 * Figure out a range to grow the bus range.  First, find the
748 	 * first bus number after the last allocated bus in the rman and
749 	 * enforce that as a minimum starting point for the range.
750 	 */
751 	if (rman_last_free_region(&bus->rman, &start_free, &end_free) != 0 ||
752 	    end_free != bus->sub)
753 		start_free = bus->sub + 1;
754 	if (start_free < start)
755 		start_free = start;
756 	new_end = start_free + count - 1;
757 
758 	/*
759 	 * See if this new range would satisfy the request if it
760 	 * succeeds.
761 	 */
762 	if (new_end > end)
763 		return (NULL);
764 
765 	/* Finally, attempt to grow the existing resource. */
766 	if (bootverbose) {
767 		device_printf(bus->dev,
768 		    "attempting to grow bus range for %ju buses\n", count);
769 		printf("\tback candidate range: %ju-%ju\n", start_free,
770 		    new_end);
771 	}
772 	if (pcib_grow_subbus(bus, new_end) == 0)
773 		return (pcib_suballoc_bus(bus, child, rid, start, end, count,
774 		    flags));
775 	return (NULL);
776 }
777 #endif
778 
779 #else
780 
781 /*
782  * Is the prefetch window open (eg, can we allocate memory in it?)
783  */
784 static int
785 pcib_is_prefetch_open(struct pcib_softc *sc)
786 {
787 	return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit);
788 }
789 
790 /*
791  * Is the nonprefetch window open (eg, can we allocate memory in it?)
792  */
793 static int
794 pcib_is_nonprefetch_open(struct pcib_softc *sc)
795 {
796 	return (sc->membase > 0 && sc->membase < sc->memlimit);
797 }
798 
799 /*
800  * Is the io window open (eg, can we allocate ports in it?)
801  */
802 static int
803 pcib_is_io_open(struct pcib_softc *sc)
804 {
805 	return (sc->iobase > 0 && sc->iobase < sc->iolimit);
806 }
807 
808 /*
809  * Get current I/O decode.
810  */
811 static void
812 pcib_get_io_decode(struct pcib_softc *sc)
813 {
814 	device_t	dev;
815 	uint32_t	iolow;
816 
817 	dev = sc->dev;
818 
819 	iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1);
820 	if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32)
821 		sc->iobase = PCI_PPBIOBASE(
822 		    pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow);
823 	else
824 		sc->iobase = PCI_PPBIOBASE(0, iolow);
825 
826 	iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1);
827 	if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32)
828 		sc->iolimit = PCI_PPBIOLIMIT(
829 		    pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow);
830 	else
831 		sc->iolimit = PCI_PPBIOLIMIT(0, iolow);
832 }
833 
834 /*
835  * Get current memory decode.
836  */
837 static void
838 pcib_get_mem_decode(struct pcib_softc *sc)
839 {
840 	device_t	dev;
841 	pci_addr_t	pmemlow;
842 
843 	dev = sc->dev;
844 
845 	sc->membase = PCI_PPBMEMBASE(0,
846 	    pci_read_config(dev, PCIR_MEMBASE_1, 2));
847 	sc->memlimit = PCI_PPBMEMLIMIT(0,
848 	    pci_read_config(dev, PCIR_MEMLIMIT_1, 2));
849 
850 	pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2);
851 	if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64)
852 		sc->pmembase = PCI_PPBMEMBASE(
853 		    pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow);
854 	else
855 		sc->pmembase = PCI_PPBMEMBASE(0, pmemlow);
856 
857 	pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2);
858 	if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64)
859 		sc->pmemlimit = PCI_PPBMEMLIMIT(
860 		    pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow);
861 	else
862 		sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow);
863 }
864 
865 /*
866  * Restore previous I/O decode.
867  */
868 static void
869 pcib_set_io_decode(struct pcib_softc *sc)
870 {
871 	device_t	dev;
872 	uint32_t	iohi;
873 
874 	dev = sc->dev;
875 
876 	iohi = sc->iobase >> 16;
877 	if (iohi > 0)
878 		pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2);
879 	pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1);
880 
881 	iohi = sc->iolimit >> 16;
882 	if (iohi > 0)
883 		pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2);
884 	pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1);
885 }
886 
887 /*
888  * Restore previous memory decode.
889  */
890 static void
891 pcib_set_mem_decode(struct pcib_softc *sc)
892 {
893 	device_t	dev;
894 	pci_addr_t	pmemhi;
895 
896 	dev = sc->dev;
897 
898 	pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2);
899 	pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2);
900 
901 	pmemhi = sc->pmembase >> 32;
902 	if (pmemhi > 0)
903 		pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4);
904 	pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2);
905 
906 	pmemhi = sc->pmemlimit >> 32;
907 	if (pmemhi > 0)
908 		pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4);
909 	pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2);
910 }
911 #endif
912 
913 #ifdef PCI_HP
914 /*
915  * PCI-express HotPlug support.
916  */
917 static int pci_enable_pcie_hp = 1;
918 SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_hp, CTLFLAG_RDTUN,
919     &pci_enable_pcie_hp, 0,
920     "Enable support for native PCI-express HotPlug.");
921 
922 static void
923 pcib_probe_hotplug(struct pcib_softc *sc)
924 {
925 	device_t dev;
926 	uint32_t link_cap;
927 	uint16_t link_sta, slot_sta;
928 
929 	if (!pci_enable_pcie_hp)
930 		return;
931 
932 	dev = sc->dev;
933 	if (pci_find_cap(dev, PCIY_EXPRESS, NULL) != 0)
934 		return;
935 
936 	if (!(pcie_read_config(dev, PCIER_FLAGS, 2) & PCIEM_FLAGS_SLOT))
937 		return;
938 
939 	sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4);
940 
941 	if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC) == 0)
942 		return;
943 	link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4);
944 	if ((link_cap & PCIEM_LINK_CAP_DL_ACTIVE) == 0)
945 		return;
946 
947 	/*
948 	 * Some devices report that they have an MRL when they actually
949 	 * do not.  Since they always report that the MRL is open, child
950 	 * devices would be ignored.  Try to detect these devices and
951 	 * ignore their claim of HotPlug support.
952 	 *
953 	 * If there is an open MRL but the Data Link Layer is active,
954 	 * the MRL is not real.
955 	 */
956 	if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) != 0) {
957 		link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
958 		slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
959 		if ((slot_sta & PCIEM_SLOT_STA_MRLSS) != 0 &&
960 		    (link_sta & PCIEM_LINK_STA_DL_ACTIVE) != 0) {
961 			return;
962 		}
963 	}
964 
965 	/*
966 	 * Now that we're sure we want to do hot plug, ask the
967 	 * firmware, if any, if that's OK.
968 	 */
969 	if (pcib_request_feature(dev, PCI_FEATURE_HP) != 0) {
970 		if (bootverbose)
971 			device_printf(dev, "Unable to activate hot plug feature.\n");
972 		return;
973 	}
974 
975 	sc->flags |= PCIB_HOTPLUG;
976 }
977 
978 /*
979  * Send a HotPlug command to the slot control register.  If this slot
980  * uses command completion interrupts and a previous command is still
981  * in progress, then the command is dropped.  Once the previous
982  * command completes or times out, pcib_pcie_hotplug_update() will be
983  * invoked to post a new command based on the slot's state at that
984  * time.
985  */
986 static void
987 pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask)
988 {
989 	device_t dev;
990 	uint16_t ctl, new;
991 
992 	dev = sc->dev;
993 
994 	if (sc->flags & PCIB_HOTPLUG_CMD_PENDING)
995 		return;
996 
997 	ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2);
998 	new = (ctl & ~mask) | val;
999 	if (new == ctl)
1000 		return;
1001 	if (bootverbose)
1002 		device_printf(dev, "HotPlug command: %04x -> %04x\n", ctl, new);
1003 	pcie_write_config(dev, PCIER_SLOT_CTL, new, 2);
1004 	if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS) &&
1005 	    (ctl & new) & PCIEM_SLOT_CTL_CCIE) {
1006 		sc->flags |= PCIB_HOTPLUG_CMD_PENDING;
1007 		if (!cold)
1008 			callout_reset(&sc->pcie_cc_timer, hz,
1009 			    pcib_pcie_cc_timeout, sc);
1010 	}
1011 }
1012 
1013 static void
1014 pcib_pcie_hotplug_command_completed(struct pcib_softc *sc)
1015 {
1016 	device_t dev;
1017 
1018 	dev = sc->dev;
1019 
1020 	if (bootverbose)
1021 		device_printf(dev, "Command Completed\n");
1022 	if (!(sc->flags & PCIB_HOTPLUG_CMD_PENDING))
1023 		return;
1024 	callout_stop(&sc->pcie_cc_timer);
1025 	sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
1026 	wakeup(sc);
1027 }
1028 
1029 /*
1030  * Returns true if a card is fully inserted from the user's
1031  * perspective.  It may not yet be ready for access, but the driver
1032  * can now start enabling access if necessary.
1033  */
1034 static bool
1035 pcib_hotplug_inserted(struct pcib_softc *sc)
1036 {
1037 
1038 	/* Pretend the card isn't present if a detach is forced. */
1039 	if (sc->flags & PCIB_DETACHING)
1040 		return (false);
1041 
1042 	/* Card must be present in the slot. */
1043 	if ((sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS) == 0)
1044 		return (false);
1045 
1046 	/* A power fault implicitly turns off power to the slot. */
1047 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD)
1048 		return (false);
1049 
1050 	/* If the MRL is disengaged, the slot is powered off. */
1051 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP &&
1052 	    (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS) != 0)
1053 		return (false);
1054 
1055 	return (true);
1056 }
1057 
1058 /*
1059  * Returns -1 if the card is fully inserted, powered, and ready for
1060  * access.  Otherwise, returns 0.
1061  */
1062 static int
1063 pcib_hotplug_present(struct pcib_softc *sc)
1064 {
1065 
1066 	/* Card must be inserted. */
1067 	if (!pcib_hotplug_inserted(sc))
1068 		return (0);
1069 
1070 	/*
1071 	 * Require the Electromechanical Interlock to be engaged if
1072 	 * present.
1073 	 */
1074 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP &&
1075 	    (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) == 0)
1076 		return (0);
1077 
1078 	/* Require the Data Link Layer to be active. */
1079 	if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE))
1080 		return (0);
1081 
1082 	return (-1);
1083 }
1084 
1085 static void
1086 pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask,
1087     bool schedule_task)
1088 {
1089 	bool card_inserted, ei_engaged;
1090 
1091 	/* Clear DETACHING if Presence Detect has cleared. */
1092 	if ((sc->pcie_slot_sta & (PCIEM_SLOT_STA_PDC | PCIEM_SLOT_STA_PDS)) ==
1093 	    PCIEM_SLOT_STA_PDC)
1094 		sc->flags &= ~PCIB_DETACHING;
1095 
1096 	card_inserted = pcib_hotplug_inserted(sc);
1097 
1098 	/* Turn the power indicator on if a card is inserted. */
1099 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PIP) {
1100 		mask |= PCIEM_SLOT_CTL_PIC;
1101 		if (card_inserted)
1102 			val |= PCIEM_SLOT_CTL_PI_ON;
1103 		else if (sc->flags & PCIB_DETACH_PENDING)
1104 			val |= PCIEM_SLOT_CTL_PI_BLINK;
1105 		else
1106 			val |= PCIEM_SLOT_CTL_PI_OFF;
1107 	}
1108 
1109 	/* Turn the power on via the Power Controller if a card is inserted. */
1110 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) {
1111 		mask |= PCIEM_SLOT_CTL_PCC;
1112 		if (card_inserted)
1113 			val |= PCIEM_SLOT_CTL_PC_ON;
1114 		else
1115 			val |= PCIEM_SLOT_CTL_PC_OFF;
1116 	}
1117 
1118 	/*
1119 	 * If a card is inserted, enable the Electromechanical
1120 	 * Interlock.  If a card is not inserted (or we are in the
1121 	 * process of detaching), disable the Electromechanical
1122 	 * Interlock.
1123 	 */
1124 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP) {
1125 		mask |= PCIEM_SLOT_CTL_EIC;
1126 		ei_engaged = (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) != 0;
1127 		if (card_inserted != ei_engaged)
1128 			val |= PCIEM_SLOT_CTL_EIC;
1129 	}
1130 
1131 	/*
1132 	 * Start a timer to see if the Data Link Layer times out.
1133 	 * Note that we only start the timer if Presence Detect or MRL Sensor
1134 	 * changed on this interrupt.  Stop any scheduled timer if
1135 	 * the Data Link Layer is active.
1136 	 */
1137 	if (card_inserted &&
1138 	    !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) &&
1139 	    sc->pcie_slot_sta &
1140 	    (PCIEM_SLOT_STA_MRLSC | PCIEM_SLOT_STA_PDC)) {
1141 		if (cold)
1142 			device_printf(sc->dev,
1143 			    "Data Link Layer inactive\n");
1144 		else
1145 			callout_reset(&sc->pcie_dll_timer, hz,
1146 			    pcib_pcie_dll_timeout, sc);
1147 	} else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)
1148 		callout_stop(&sc->pcie_dll_timer);
1149 
1150 	pcib_pcie_hotplug_command(sc, val, mask);
1151 
1152 	/*
1153 	 * During attach the child "pci" device is added synchronously;
1154 	 * otherwise, the task is scheduled to manage the child
1155 	 * device.
1156 	 */
1157 	if (schedule_task &&
1158 	    (pcib_hotplug_present(sc) != 0) != (sc->child != NULL))
1159 		taskqueue_enqueue(taskqueue_thread, &sc->pcie_hp_task);
1160 }
1161 
1162 static void
1163 pcib_pcie_intr_hotplug(void *arg)
1164 {
1165 	struct pcib_softc *sc;
1166 	device_t dev;
1167 
1168 	sc = arg;
1169 	dev = sc->dev;
1170 	sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
1171 
1172 	/* Clear the events just reported. */
1173 	pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2);
1174 
1175 	if (bootverbose)
1176 		device_printf(dev, "HotPlug interrupt: %#x\n",
1177 		    sc->pcie_slot_sta);
1178 
1179 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_ABP) {
1180 		if (sc->flags & PCIB_DETACH_PENDING) {
1181 			device_printf(dev,
1182 			    "Attention Button Pressed: Detach Cancelled\n");
1183 			sc->flags &= ~PCIB_DETACH_PENDING;
1184 			callout_stop(&sc->pcie_ab_timer);
1185 		} else {
1186 			device_printf(dev,
1187 		    "Attention Button Pressed: Detaching in 5 seconds\n");
1188 			sc->flags |= PCIB_DETACH_PENDING;
1189 			callout_reset(&sc->pcie_ab_timer, 5 * hz,
1190 			    pcib_pcie_ab_timeout, sc);
1191 		}
1192 	}
1193 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD)
1194 		device_printf(dev, "Power Fault Detected\n");
1195 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSC)
1196 		device_printf(dev, "MRL Sensor Changed to %s\n",
1197 		    sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS ? "open" :
1198 		    "closed");
1199 	if (bootverbose && sc->pcie_slot_sta & PCIEM_SLOT_STA_PDC)
1200 		device_printf(dev, "Presence Detect Changed to %s\n",
1201 		    sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS ? "card present" :
1202 		    "empty");
1203 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_CC)
1204 		pcib_pcie_hotplug_command_completed(sc);
1205 	if (sc->pcie_slot_sta & PCIEM_SLOT_STA_DLLSC) {
1206 		sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
1207 		if (bootverbose)
1208 			device_printf(dev,
1209 			    "Data Link Layer State Changed to %s\n",
1210 			    sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE ?
1211 			    "active" : "inactive");
1212 	}
1213 
1214 	pcib_pcie_hotplug_update(sc, 0, 0, true);
1215 }
1216 
1217 static void
1218 pcib_pcie_hotplug_task(void *context, int pending)
1219 {
1220 	struct pcib_softc *sc;
1221 	device_t dev;
1222 
1223 	sc = context;
1224 	mtx_lock(&Giant);
1225 	dev = sc->dev;
1226 	if (pcib_hotplug_present(sc) != 0) {
1227 		if (sc->child == NULL) {
1228 			sc->child = device_add_child(dev, "pci", -1);
1229 			bus_generic_attach(dev);
1230 		}
1231 	} else {
1232 		if (sc->child != NULL) {
1233 			if (device_delete_child(dev, sc->child) == 0)
1234 				sc->child = NULL;
1235 		}
1236 	}
1237 	mtx_unlock(&Giant);
1238 }
1239 
1240 static void
1241 pcib_pcie_ab_timeout(void *arg)
1242 {
1243 	struct pcib_softc *sc;
1244 
1245 	sc = arg;
1246 	mtx_assert(&Giant, MA_OWNED);
1247 	if (sc->flags & PCIB_DETACH_PENDING) {
1248 		sc->flags |= PCIB_DETACHING;
1249 		sc->flags &= ~PCIB_DETACH_PENDING;
1250 		pcib_pcie_hotplug_update(sc, 0, 0, true);
1251 	}
1252 }
1253 
1254 static void
1255 pcib_pcie_cc_timeout(void *arg)
1256 {
1257 	struct pcib_softc *sc;
1258 	device_t dev;
1259 	uint16_t sta;
1260 
1261 	sc = arg;
1262 	dev = sc->dev;
1263 	mtx_assert(&Giant, MA_OWNED);
1264 	sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
1265 	if (!(sta & PCIEM_SLOT_STA_CC)) {
1266 		device_printf(dev,
1267 		    "HotPlug Command Timed Out - forcing detach\n");
1268 		sc->flags &= ~(PCIB_HOTPLUG_CMD_PENDING | PCIB_DETACH_PENDING);
1269 		sc->flags |= PCIB_DETACHING;
1270 		pcib_pcie_hotplug_update(sc, 0, 0, true);
1271 	} else {
1272 		device_printf(dev,
1273 	    "Missed HotPlug interrupt waiting for Command Completion\n");
1274 		pcib_pcie_intr_hotplug(sc);
1275 	}
1276 }
1277 
1278 static void
1279 pcib_pcie_dll_timeout(void *arg)
1280 {
1281 	struct pcib_softc *sc;
1282 	device_t dev;
1283 	uint16_t sta;
1284 
1285 	sc = arg;
1286 	dev = sc->dev;
1287 	mtx_assert(&Giant, MA_OWNED);
1288 	sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
1289 	if (!(sta & PCIEM_LINK_STA_DL_ACTIVE)) {
1290 		device_printf(dev,
1291 		    "Timed out waiting for Data Link Layer Active\n");
1292 		sc->flags |= PCIB_DETACHING;
1293 		pcib_pcie_hotplug_update(sc, 0, 0, true);
1294 	} else if (sta != sc->pcie_link_sta) {
1295 		device_printf(dev,
1296 		    "Missed HotPlug interrupt waiting for DLL Active\n");
1297 		pcib_pcie_intr_hotplug(sc);
1298 	}
1299 }
1300 
1301 static int
1302 pcib_alloc_pcie_irq(struct pcib_softc *sc)
1303 {
1304 	device_t dev;
1305 	int count, error, rid;
1306 
1307 	rid = -1;
1308 	dev = sc->dev;
1309 
1310 	/*
1311 	 * For simplicity, only use MSI-X if there is a single message.
1312 	 * To support a device with multiple messages we would have to
1313 	 * use remap intr if the MSI number is not 0.
1314 	 */
1315 	count = pci_msix_count(dev);
1316 	if (count == 1) {
1317 		error = pci_alloc_msix(dev, &count);
1318 		if (error == 0)
1319 			rid = 1;
1320 	}
1321 
1322 	if (rid < 0 && pci_msi_count(dev) > 0) {
1323 		count = 1;
1324 		error = pci_alloc_msi(dev, &count);
1325 		if (error == 0)
1326 			rid = 1;
1327 	}
1328 
1329 	if (rid < 0)
1330 		rid = 0;
1331 
1332 	sc->pcie_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1333 	    RF_ACTIVE);
1334 	if (sc->pcie_irq == NULL) {
1335 		device_printf(dev,
1336 		    "Failed to allocate interrupt for PCI-e events\n");
1337 		if (rid > 0)
1338 			pci_release_msi(dev);
1339 		return (ENXIO);
1340 	}
1341 
1342 	error = bus_setup_intr(dev, sc->pcie_irq, INTR_TYPE_MISC,
1343 	    NULL, pcib_pcie_intr_hotplug, sc, &sc->pcie_ihand);
1344 	if (error) {
1345 		device_printf(dev, "Failed to setup PCI-e interrupt handler\n");
1346 		bus_release_resource(dev, SYS_RES_IRQ, rid, sc->pcie_irq);
1347 		if (rid > 0)
1348 			pci_release_msi(dev);
1349 		return (error);
1350 	}
1351 	return (0);
1352 }
1353 
1354 static int
1355 pcib_release_pcie_irq(struct pcib_softc *sc)
1356 {
1357 	device_t dev;
1358 	int error;
1359 
1360 	dev = sc->dev;
1361 	error = bus_teardown_intr(dev, sc->pcie_irq, sc->pcie_ihand);
1362 	if (error)
1363 		return (error);
1364 	error = bus_free_resource(dev, SYS_RES_IRQ, sc->pcie_irq);
1365 	if (error)
1366 		return (error);
1367 	return (pci_release_msi(dev));
1368 }
1369 
1370 static void
1371 pcib_setup_hotplug(struct pcib_softc *sc)
1372 {
1373 	device_t dev;
1374 	uint16_t mask, val;
1375 
1376 	dev = sc->dev;
1377 	callout_init(&sc->pcie_ab_timer, 0);
1378 	callout_init(&sc->pcie_cc_timer, 0);
1379 	callout_init(&sc->pcie_dll_timer, 0);
1380 	TASK_INIT(&sc->pcie_hp_task, 0, pcib_pcie_hotplug_task, sc);
1381 
1382 	/* Allocate IRQ. */
1383 	if (pcib_alloc_pcie_irq(sc) != 0)
1384 		return;
1385 
1386 	sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2);
1387 	sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
1388 
1389 	/* Clear any events previously pending. */
1390 	pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2);
1391 
1392 	/* Enable HotPlug events. */
1393 	mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE |
1394 	    PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE |
1395 	    PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE;
1396 	val = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_PDCE;
1397 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_APB)
1398 		val |= PCIEM_SLOT_CTL_ABPE;
1399 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP)
1400 		val |= PCIEM_SLOT_CTL_PFDE;
1401 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP)
1402 		val |= PCIEM_SLOT_CTL_MRLSCE;
1403 	if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS))
1404 		val |= PCIEM_SLOT_CTL_CCIE;
1405 
1406 	/* Turn the attention indicator off. */
1407 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) {
1408 		mask |= PCIEM_SLOT_CTL_AIC;
1409 		val |= PCIEM_SLOT_CTL_AI_OFF;
1410 	}
1411 
1412 	pcib_pcie_hotplug_update(sc, val, mask, false);
1413 }
1414 
1415 static int
1416 pcib_detach_hotplug(struct pcib_softc *sc)
1417 {
1418 	uint16_t mask, val;
1419 	int error;
1420 
1421 	/* Disable the card in the slot and force it to detach. */
1422 	if (sc->flags & PCIB_DETACH_PENDING) {
1423 		sc->flags &= ~PCIB_DETACH_PENDING;
1424 		callout_stop(&sc->pcie_ab_timer);
1425 	}
1426 	sc->flags |= PCIB_DETACHING;
1427 
1428 	if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) {
1429 		callout_stop(&sc->pcie_cc_timer);
1430 		tsleep(sc, 0, "hpcmd", hz);
1431 		sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
1432 	}
1433 
1434 	/* Disable HotPlug events. */
1435 	mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE |
1436 	    PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE |
1437 	    PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE;
1438 	val = 0;
1439 
1440 	/* Turn the attention indicator off. */
1441 	if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) {
1442 		mask |= PCIEM_SLOT_CTL_AIC;
1443 		val |= PCIEM_SLOT_CTL_AI_OFF;
1444 	}
1445 
1446 	pcib_pcie_hotplug_update(sc, val, mask, false);
1447 
1448 	error = pcib_release_pcie_irq(sc);
1449 	if (error)
1450 		return (error);
1451 	taskqueue_drain(taskqueue_thread, &sc->pcie_hp_task);
1452 	callout_drain(&sc->pcie_ab_timer);
1453 	callout_drain(&sc->pcie_cc_timer);
1454 	callout_drain(&sc->pcie_dll_timer);
1455 	return (0);
1456 }
1457 #endif
1458 
1459 /*
1460  * Get current bridge configuration.
1461  */
1462 static void
1463 pcib_cfg_save(struct pcib_softc *sc)
1464 {
1465 #ifndef NEW_PCIB
1466 	device_t	dev;
1467 	uint16_t command;
1468 
1469 	dev = sc->dev;
1470 
1471 	command = pci_read_config(dev, PCIR_COMMAND, 2);
1472 	if (command & PCIM_CMD_PORTEN)
1473 		pcib_get_io_decode(sc);
1474 	if (command & PCIM_CMD_MEMEN)
1475 		pcib_get_mem_decode(sc);
1476 #endif
1477 }
1478 
1479 /*
1480  * Restore previous bridge configuration.
1481  */
1482 static void
1483 pcib_cfg_restore(struct pcib_softc *sc)
1484 {
1485 #ifndef NEW_PCIB
1486 	uint16_t command;
1487 #endif
1488 
1489 #ifdef NEW_PCIB
1490 	pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM);
1491 #else
1492 	command = pci_read_config(sc->dev, PCIR_COMMAND, 2);
1493 	if (command & PCIM_CMD_PORTEN)
1494 		pcib_set_io_decode(sc);
1495 	if (command & PCIM_CMD_MEMEN)
1496 		pcib_set_mem_decode(sc);
1497 #endif
1498 }
1499 
1500 /*
1501  * Generic device interface
1502  */
1503 static int
1504 pcib_probe(device_t dev)
1505 {
1506     if ((pci_get_class(dev) == PCIC_BRIDGE) &&
1507 	(pci_get_subclass(dev) == PCIS_BRIDGE_PCI)) {
1508 	device_set_desc(dev, "PCI-PCI bridge");
1509 	return(-10000);
1510     }
1511     return(ENXIO);
1512 }
1513 
1514 void
1515 pcib_attach_common(device_t dev)
1516 {
1517     struct pcib_softc	*sc;
1518     struct sysctl_ctx_list *sctx;
1519     struct sysctl_oid	*soid;
1520     int comma;
1521 
1522     sc = device_get_softc(dev);
1523     sc->dev = dev;
1524 
1525     /*
1526      * Get current bridge configuration.
1527      */
1528     sc->domain = pci_get_domain(dev);
1529 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
1530     sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1);
1531     sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1);
1532 #endif
1533     sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2);
1534     pcib_cfg_save(sc);
1535 
1536     /*
1537      * The primary bus register should always be the bus of the
1538      * parent.
1539      */
1540     sc->pribus = pci_get_bus(dev);
1541     pci_write_config(dev, PCIR_PRIBUS_1, sc->pribus, 1);
1542 
1543     /*
1544      * Setup sysctl reporting nodes
1545      */
1546     sctx = device_get_sysctl_ctx(dev);
1547     soid = device_get_sysctl_tree(dev);
1548     SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain",
1549       CTLFLAG_RD, &sc->domain, 0, "Domain number");
1550     SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus",
1551       CTLFLAG_RD, &sc->pribus, 0, "Primary bus number");
1552     SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus",
1553       CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number");
1554     SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus",
1555       CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number");
1556 
1557     /*
1558      * Quirk handling.
1559      */
1560     switch (pci_get_devid(dev)) {
1561 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
1562     case 0x12258086:		/* Intel 82454KX/GX (Orion) */
1563 	{
1564 	    uint8_t	supbus;
1565 
1566 	    supbus = pci_read_config(dev, 0x41, 1);
1567 	    if (supbus != 0xff) {
1568 		sc->bus.sec = supbus + 1;
1569 		sc->bus.sub = supbus + 1;
1570 	    }
1571 	    break;
1572 	}
1573 #endif
1574 
1575     /*
1576      * The i82380FB mobile docking controller is a PCI-PCI bridge,
1577      * and it is a subtractive bridge.  However, the ProgIf is wrong
1578      * so the normal setting of PCIB_SUBTRACTIVE bit doesn't
1579      * happen.  There are also Toshiba and Cavium ThunderX bridges
1580      * that behave this way.
1581      */
1582     case 0xa002177d:		/* Cavium ThunderX */
1583     case 0x124b8086:		/* Intel 82380FB Mobile */
1584     case 0x060513d7:		/* Toshiba ???? */
1585 	sc->flags |= PCIB_SUBTRACTIVE;
1586 	break;
1587 
1588 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
1589     /* Compaq R3000 BIOS sets wrong subordinate bus number. */
1590     case 0x00dd10de:
1591 	{
1592 	    char *cp;
1593 
1594 	    if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
1595 		break;
1596 	    if (strncmp(cp, "Compal", 6) != 0) {
1597 		freeenv(cp);
1598 		break;
1599 	    }
1600 	    freeenv(cp);
1601 	    if ((cp = kern_getenv("smbios.planar.product")) == NULL)
1602 		break;
1603 	    if (strncmp(cp, "08A0", 4) != 0) {
1604 		freeenv(cp);
1605 		break;
1606 	    }
1607 	    freeenv(cp);
1608 	    if (sc->bus.sub < 0xa) {
1609 		pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1);
1610 		sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1);
1611 	    }
1612 	    break;
1613 	}
1614 #endif
1615     }
1616 
1617     if (pci_msi_device_blacklisted(dev))
1618 	sc->flags |= PCIB_DISABLE_MSI;
1619 
1620     if (pci_msix_device_blacklisted(dev))
1621 	sc->flags |= PCIB_DISABLE_MSIX;
1622 
1623     /*
1624      * Intel 815, 845 and other chipsets say they are PCI-PCI bridges,
1625      * but have a ProgIF of 0x80.  The 82801 family (AA, AB, BAM/CAM,
1626      * BA/CA/DB and E) PCI bridges are HUB-PCI bridges, in Intelese.
1627      * This means they act as if they were subtractively decoding
1628      * bridges and pass all transactions.  Mark them and real ProgIf 1
1629      * parts as subtractive.
1630      */
1631     if ((pci_get_devid(dev) & 0xff00ffff) == 0x24008086 ||
1632       pci_read_config(dev, PCIR_PROGIF, 1) == PCIP_BRIDGE_PCI_SUBTRACTIVE)
1633 	sc->flags |= PCIB_SUBTRACTIVE;
1634 
1635 #ifdef PCI_HP
1636     pcib_probe_hotplug(sc);
1637 #endif
1638 #ifdef NEW_PCIB
1639 #ifdef PCI_RES_BUS
1640     pcib_setup_secbus(dev, &sc->bus, 1);
1641 #endif
1642     pcib_probe_windows(sc);
1643 #endif
1644 #ifdef PCI_HP
1645     if (sc->flags & PCIB_HOTPLUG)
1646 	    pcib_setup_hotplug(sc);
1647 #endif
1648     if (bootverbose) {
1649 	device_printf(dev, "  domain            %d\n", sc->domain);
1650 	device_printf(dev, "  secondary bus     %d\n", sc->bus.sec);
1651 	device_printf(dev, "  subordinate bus   %d\n", sc->bus.sub);
1652 #ifdef NEW_PCIB
1653 	if (pcib_is_window_open(&sc->io))
1654 	    device_printf(dev, "  I/O decode        0x%jx-0x%jx\n",
1655 	      (uintmax_t)sc->io.base, (uintmax_t)sc->io.limit);
1656 	if (pcib_is_window_open(&sc->mem))
1657 	    device_printf(dev, "  memory decode     0x%jx-0x%jx\n",
1658 	      (uintmax_t)sc->mem.base, (uintmax_t)sc->mem.limit);
1659 	if (pcib_is_window_open(&sc->pmem))
1660 	    device_printf(dev, "  prefetched decode 0x%jx-0x%jx\n",
1661 	      (uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit);
1662 #else
1663 	if (pcib_is_io_open(sc))
1664 	    device_printf(dev, "  I/O decode        0x%x-0x%x\n",
1665 	      sc->iobase, sc->iolimit);
1666 	if (pcib_is_nonprefetch_open(sc))
1667 	    device_printf(dev, "  memory decode     0x%jx-0x%jx\n",
1668 	      (uintmax_t)sc->membase, (uintmax_t)sc->memlimit);
1669 	if (pcib_is_prefetch_open(sc))
1670 	    device_printf(dev, "  prefetched decode 0x%jx-0x%jx\n",
1671 	      (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit);
1672 #endif
1673 	if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) ||
1674 	    sc->flags & PCIB_SUBTRACTIVE) {
1675 		device_printf(dev, "  special decode    ");
1676 		comma = 0;
1677 		if (sc->bridgectl & PCIB_BCR_ISA_ENABLE) {
1678 			printf("ISA");
1679 			comma = 1;
1680 		}
1681 		if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) {
1682 			printf("%sVGA", comma ? ", " : "");
1683 			comma = 1;
1684 		}
1685 		if (sc->flags & PCIB_SUBTRACTIVE)
1686 			printf("%ssubtractive", comma ? ", " : "");
1687 		printf("\n");
1688 	}
1689     }
1690 
1691     /*
1692      * Always enable busmastering on bridges so that transactions
1693      * initiated on the secondary bus are passed through to the
1694      * primary bus.
1695      */
1696     pci_enable_busmaster(dev);
1697 }
1698 
1699 #ifdef PCI_HP
1700 static int
1701 pcib_present(struct pcib_softc *sc)
1702 {
1703 
1704 	if (sc->flags & PCIB_HOTPLUG)
1705 		return (pcib_hotplug_present(sc) != 0);
1706 	return (1);
1707 }
1708 #endif
1709 
1710 int
1711 pcib_attach_child(device_t dev)
1712 {
1713 	struct pcib_softc *sc;
1714 
1715 	sc = device_get_softc(dev);
1716 	if (sc->bus.sec == 0) {
1717 		/* no secondary bus; we should have fixed this */
1718 		return(0);
1719 	}
1720 
1721 #ifdef PCI_HP
1722 	if (!pcib_present(sc)) {
1723 		/* An empty HotPlug slot, so don't add a PCI bus yet. */
1724 		return (0);
1725 	}
1726 #endif
1727 
1728 	sc->child = device_add_child(dev, "pci", -1);
1729 	return (bus_generic_attach(dev));
1730 }
1731 
1732 int
1733 pcib_attach(device_t dev)
1734 {
1735 
1736     pcib_attach_common(dev);
1737     return (pcib_attach_child(dev));
1738 }
1739 
1740 int
1741 pcib_detach(device_t dev)
1742 {
1743 #if defined(PCI_HP) || defined(NEW_PCIB)
1744 	struct pcib_softc *sc;
1745 #endif
1746 	int error;
1747 
1748 #if defined(PCI_HP) || defined(NEW_PCIB)
1749 	sc = device_get_softc(dev);
1750 #endif
1751 	error = bus_generic_detach(dev);
1752 	if (error)
1753 		return (error);
1754 #ifdef PCI_HP
1755 	if (sc->flags & PCIB_HOTPLUG) {
1756 		error = pcib_detach_hotplug(sc);
1757 		if (error)
1758 			return (error);
1759 	}
1760 #endif
1761 	error = device_delete_children(dev);
1762 	if (error)
1763 		return (error);
1764 #ifdef NEW_PCIB
1765 	pcib_free_windows(sc);
1766 #ifdef PCI_RES_BUS
1767 	pcib_free_secbus(dev, &sc->bus);
1768 #endif
1769 #endif
1770 	return (0);
1771 }
1772 
1773 int
1774 pcib_suspend(device_t dev)
1775 {
1776 
1777 	pcib_cfg_save(device_get_softc(dev));
1778 	return (bus_generic_suspend(dev));
1779 }
1780 
1781 int
1782 pcib_resume(device_t dev)
1783 {
1784 
1785 	pcib_cfg_restore(device_get_softc(dev));
1786 	return (bus_generic_resume(dev));
1787 }
1788 
1789 void
1790 pcib_bridge_init(device_t dev)
1791 {
1792 	pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1);
1793 	pci_write_config(dev, PCIR_IOBASEH_1, 0xffff, 2);
1794 	pci_write_config(dev, PCIR_IOLIMITL_1, 0, 1);
1795 	pci_write_config(dev, PCIR_IOLIMITH_1, 0, 2);
1796 	pci_write_config(dev, PCIR_MEMBASE_1, 0xffff, 2);
1797 	pci_write_config(dev, PCIR_MEMLIMIT_1, 0, 2);
1798 	pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2);
1799 	pci_write_config(dev, PCIR_PMBASEH_1, 0xffffffff, 4);
1800 	pci_write_config(dev, PCIR_PMLIMITL_1, 0, 2);
1801 	pci_write_config(dev, PCIR_PMLIMITH_1, 0, 4);
1802 }
1803 
1804 int
1805 pcib_child_present(device_t dev, device_t child)
1806 {
1807 #ifdef PCI_HP
1808 	struct pcib_softc *sc = device_get_softc(dev);
1809 	int retval;
1810 
1811 	retval = bus_child_present(dev);
1812 	if (retval != 0 && sc->flags & PCIB_HOTPLUG)
1813 		retval = pcib_hotplug_present(sc);
1814 	return (retval);
1815 #else
1816 	return (bus_child_present(dev));
1817 #endif
1818 }
1819 
1820 int
1821 pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1822 {
1823     struct pcib_softc	*sc = device_get_softc(dev);
1824 
1825     switch (which) {
1826     case PCIB_IVAR_DOMAIN:
1827 	*result = sc->domain;
1828 	return(0);
1829     case PCIB_IVAR_BUS:
1830 	*result = sc->bus.sec;
1831 	return(0);
1832     }
1833     return(ENOENT);
1834 }
1835 
1836 int
1837 pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
1838 {
1839 
1840     switch (which) {
1841     case PCIB_IVAR_DOMAIN:
1842 	return(EINVAL);
1843     case PCIB_IVAR_BUS:
1844 	return(EINVAL);
1845     }
1846     return(ENOENT);
1847 }
1848 
1849 #ifdef NEW_PCIB
1850 /*
1851  * Attempt to allocate a resource from the existing resources assigned
1852  * to a window.
1853  */
1854 static struct resource *
1855 pcib_suballoc_resource(struct pcib_softc *sc, struct pcib_window *w,
1856     device_t child, int type, int *rid, rman_res_t start, rman_res_t end,
1857     rman_res_t count, u_int flags)
1858 {
1859 	struct resource *res;
1860 
1861 	if (!pcib_is_window_open(w))
1862 		return (NULL);
1863 
1864 	res = rman_reserve_resource(&w->rman, start, end, count,
1865 	    flags & ~RF_ACTIVE, child);
1866 	if (res == NULL)
1867 		return (NULL);
1868 
1869 	if (bootverbose)
1870 		device_printf(sc->dev,
1871 		    "allocated %s range (%#jx-%#jx) for rid %x of %s\n",
1872 		    w->name, rman_get_start(res), rman_get_end(res), *rid,
1873 		    pcib_child_name(child));
1874 	rman_set_rid(res, *rid);
1875 
1876 	/*
1877 	 * If the resource should be active, pass that request up the
1878 	 * tree.  This assumes the parent drivers can handle
1879 	 * activating sub-allocated resources.
1880 	 */
1881 	if (flags & RF_ACTIVE) {
1882 		if (bus_activate_resource(child, type, *rid, res) != 0) {
1883 			rman_release_resource(res);
1884 			return (NULL);
1885 		}
1886 	}
1887 
1888 	return (res);
1889 }
1890 
1891 /* Allocate a fresh resource range for an unconfigured window. */
1892 static int
1893 pcib_alloc_new_window(struct pcib_softc *sc, struct pcib_window *w, int type,
1894     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1895 {
1896 	struct resource *res;
1897 	rman_res_t base, limit, wmask;
1898 	int rid;
1899 
1900 	/*
1901 	 * If this is an I/O window on a bridge with ISA enable set
1902 	 * and the start address is below 64k, then try to allocate an
1903 	 * initial window of 0x1000 bytes long starting at address
1904 	 * 0xf000 and walking down.  Note that if the original request
1905 	 * was larger than the non-aliased range size of 0x100 our
1906 	 * caller would have raised the start address up to 64k
1907 	 * already.
1908 	 */
1909 	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE &&
1910 	    start < 65536) {
1911 		for (base = 0xf000; (long)base >= 0; base -= 0x1000) {
1912 			limit = base + 0xfff;
1913 
1914 			/*
1915 			 * Skip ranges that wouldn't work for the
1916 			 * original request.  Note that the actual
1917 			 * window that overlaps are the non-alias
1918 			 * ranges within [base, limit], so this isn't
1919 			 * quite a simple comparison.
1920 			 */
1921 			if (start + count > limit - 0x400)
1922 				continue;
1923 			if (base == 0) {
1924 				/*
1925 				 * The first open region for the window at
1926 				 * 0 is 0x400-0x4ff.
1927 				 */
1928 				if (end - count + 1 < 0x400)
1929 					continue;
1930 			} else {
1931 				if (end - count + 1 < base)
1932 					continue;
1933 			}
1934 
1935 			if (pcib_alloc_nonisa_ranges(sc, base, limit) == 0) {
1936 				w->base = base;
1937 				w->limit = limit;
1938 				return (0);
1939 			}
1940 		}
1941 		return (ENOSPC);
1942 	}
1943 
1944 	wmask = ((rman_res_t)1 << w->step) - 1;
1945 	if (RF_ALIGNMENT(flags) < w->step) {
1946 		flags &= ~RF_ALIGNMENT_MASK;
1947 		flags |= RF_ALIGNMENT_LOG2(w->step);
1948 	}
1949 	start &= ~wmask;
1950 	end |= wmask;
1951 	count = roundup2(count, (rman_res_t)1 << w->step);
1952 	rid = w->reg;
1953 	res = bus_alloc_resource(sc->dev, type, &rid, start, end, count,
1954 	    flags & ~RF_ACTIVE);
1955 	if (res == NULL)
1956 		return (ENOSPC);
1957 	pcib_add_window_resources(w, &res, 1);
1958 	pcib_activate_window(sc, type);
1959 	w->base = rman_get_start(res);
1960 	w->limit = rman_get_end(res);
1961 	return (0);
1962 }
1963 
1964 /* Try to expand an existing window to the requested base and limit. */
1965 static int
1966 pcib_expand_window(struct pcib_softc *sc, struct pcib_window *w, int type,
1967     rman_res_t base, rman_res_t limit)
1968 {
1969 	struct resource *res;
1970 	int error, i, force_64k_base;
1971 
1972 	KASSERT(base <= w->base && limit >= w->limit,
1973 	    ("attempting to shrink window"));
1974 
1975 	/*
1976 	 * XXX: pcib_grow_window() doesn't try to do this anyway and
1977 	 * the error handling for all the edge cases would be tedious.
1978 	 */
1979 	KASSERT(limit == w->limit || base == w->base,
1980 	    ("attempting to grow both ends of a window"));
1981 
1982 	/*
1983 	 * Yet more special handling for requests to expand an I/O
1984 	 * window behind an ISA-enabled bridge.  Since I/O windows
1985 	 * have to grow in 0x1000 increments and the end of the 0xffff
1986 	 * range is an alias, growing a window below 64k will always
1987 	 * result in allocating new resources and never adjusting an
1988 	 * existing resource.
1989 	 */
1990 	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE &&
1991 	    (limit <= 65535 || (base <= 65535 && base != w->base))) {
1992 		KASSERT(limit == w->limit || limit <= 65535,
1993 		    ("attempting to grow both ends across 64k ISA alias"));
1994 
1995 		if (base != w->base)
1996 			error = pcib_alloc_nonisa_ranges(sc, base, w->base - 1);
1997 		else
1998 			error = pcib_alloc_nonisa_ranges(sc, w->limit + 1,
1999 			    limit);
2000 		if (error == 0) {
2001 			w->base = base;
2002 			w->limit = limit;
2003 		}
2004 		return (error);
2005 	}
2006 
2007 	/*
2008 	 * Find the existing resource to adjust.  Usually there is only one,
2009 	 * but for an ISA-enabled bridge we might be growing the I/O window
2010 	 * above 64k and need to find the existing resource that maps all
2011 	 * of the area above 64k.
2012 	 */
2013 	for (i = 0; i < w->count; i++) {
2014 		if (rman_get_end(w->res[i]) == w->limit)
2015 			break;
2016 	}
2017 	KASSERT(i != w->count, ("did not find existing resource"));
2018 	res = w->res[i];
2019 
2020 	/*
2021 	 * Usually the resource we found should match the window's
2022 	 * existing range.  The one exception is the ISA-enabled case
2023 	 * mentioned above in which case the resource should start at
2024 	 * 64k.
2025 	 */
2026 	if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE &&
2027 	    w->base <= 65535) {
2028 		KASSERT(rman_get_start(res) == 65536,
2029 		    ("existing resource mismatch"));
2030 		force_64k_base = 1;
2031 	} else {
2032 		KASSERT(w->base == rman_get_start(res),
2033 		    ("existing resource mismatch"));
2034 		force_64k_base = 0;
2035 	}
2036 
2037 	error = bus_adjust_resource(sc->dev, type, res, force_64k_base ?
2038 	    rman_get_start(res) : base, limit);
2039 	if (error)
2040 		return (error);
2041 
2042 	/* Add the newly allocated region to the resource manager. */
2043 	if (w->base != base) {
2044 		error = rman_manage_region(&w->rman, base, w->base - 1);
2045 		w->base = base;
2046 	} else {
2047 		error = rman_manage_region(&w->rman, w->limit + 1, limit);
2048 		w->limit = limit;
2049 	}
2050 	if (error) {
2051 		if (bootverbose)
2052 			device_printf(sc->dev,
2053 			    "failed to expand %s resource manager\n", w->name);
2054 		(void)bus_adjust_resource(sc->dev, type, res, force_64k_base ?
2055 		    rman_get_start(res) : w->base, w->limit);
2056 	}
2057 	return (error);
2058 }
2059 
2060 /*
2061  * Attempt to grow a window to make room for a given resource request.
2062  */
2063 static int
2064 pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type,
2065     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
2066 {
2067 	rman_res_t align, start_free, end_free, front, back, wmask;
2068 	int error;
2069 
2070 	/*
2071 	 * Clamp the desired resource range to the maximum address
2072 	 * this window supports.  Reject impossible requests.
2073 	 *
2074 	 * For I/O port requests behind a bridge with the ISA enable
2075 	 * bit set, force large allocations to start above 64k.
2076 	 */
2077 	if (!w->valid)
2078 		return (EINVAL);
2079 	if (sc->bridgectl & PCIB_BCR_ISA_ENABLE && count > 0x100 &&
2080 	    start < 65536)
2081 		start = 65536;
2082 	if (end > w->rman.rm_end)
2083 		end = w->rman.rm_end;
2084 	if (start + count - 1 > end || start + count < start)
2085 		return (EINVAL);
2086 	wmask = ((rman_res_t)1 << w->step) - 1;
2087 
2088 	/*
2089 	 * If there is no resource at all, just try to allocate enough
2090 	 * aligned space for this resource.
2091 	 */
2092 	if (w->res == NULL) {
2093 		error = pcib_alloc_new_window(sc, w, type, start, end, count,
2094 		    flags);
2095 		if (error) {
2096 			if (bootverbose)
2097 				device_printf(sc->dev,
2098 		    "failed to allocate initial %s window (%#jx-%#jx,%#jx)\n",
2099 				    w->name, start, end, count);
2100 			return (error);
2101 		}
2102 		if (bootverbose)
2103 			device_printf(sc->dev,
2104 			    "allocated initial %s window of %#jx-%#jx\n",
2105 			    w->name, (uintmax_t)w->base, (uintmax_t)w->limit);
2106 		goto updatewin;
2107 	}
2108 
2109 	/*
2110 	 * See if growing the window would help.  Compute the minimum
2111 	 * amount of address space needed on both the front and back
2112 	 * ends of the existing window to satisfy the allocation.
2113 	 *
2114 	 * For each end, build a candidate region adjusting for the
2115 	 * required alignment, etc.  If there is a free region at the
2116 	 * edge of the window, grow from the inner edge of the free
2117 	 * region.  Otherwise grow from the window boundary.
2118 	 *
2119 	 * Growing an I/O window below 64k for a bridge with the ISA
2120 	 * enable bit doesn't require any special magic as the step
2121 	 * size of an I/O window (1k) always includes multiple
2122 	 * non-alias ranges when it is grown in either direction.
2123 	 *
2124 	 * XXX: Special case: if w->res is completely empty and the
2125 	 * request size is larger than w->res, we should find the
2126 	 * optimal aligned buffer containing w->res and allocate that.
2127 	 */
2128 	if (bootverbose)
2129 		device_printf(sc->dev,
2130 		    "attempting to grow %s window for (%#jx-%#jx,%#jx)\n",
2131 		    w->name, start, end, count);
2132 	align = (rman_res_t)1 << RF_ALIGNMENT(flags);
2133 	if (start < w->base) {
2134 		if (rman_first_free_region(&w->rman, &start_free, &end_free) !=
2135 		    0 || start_free != w->base)
2136 			end_free = w->base;
2137 		if (end_free > end)
2138 			end_free = end + 1;
2139 
2140 		/* Move end_free down until it is properly aligned. */
2141 		end_free &= ~(align - 1);
2142 		end_free--;
2143 		front = end_free - (count - 1);
2144 
2145 		/*
2146 		 * The resource would now be allocated at (front,
2147 		 * end_free).  Ensure that fits in the (start, end)
2148 		 * bounds.  end_free is checked above.  If 'front' is
2149 		 * ok, ensure it is properly aligned for this window.
2150 		 * Also check for underflow.
2151 		 */
2152 		if (front >= start && front <= end_free) {
2153 			if (bootverbose)
2154 				printf("\tfront candidate range: %#jx-%#jx\n",
2155 				    front, end_free);
2156 			front &= ~wmask;
2157 			front = w->base - front;
2158 		} else
2159 			front = 0;
2160 	} else
2161 		front = 0;
2162 	if (end > w->limit) {
2163 		if (rman_last_free_region(&w->rman, &start_free, &end_free) !=
2164 		    0 || end_free != w->limit)
2165 			start_free = w->limit + 1;
2166 		if (start_free < start)
2167 			start_free = start;
2168 
2169 		/* Move start_free up until it is properly aligned. */
2170 		start_free = roundup2(start_free, align);
2171 		back = start_free + count - 1;
2172 
2173 		/*
2174 		 * The resource would now be allocated at (start_free,
2175 		 * back).  Ensure that fits in the (start, end)
2176 		 * bounds.  start_free is checked above.  If 'back' is
2177 		 * ok, ensure it is properly aligned for this window.
2178 		 * Also check for overflow.
2179 		 */
2180 		if (back <= end && start_free <= back) {
2181 			if (bootverbose)
2182 				printf("\tback candidate range: %#jx-%#jx\n",
2183 				    start_free, back);
2184 			back |= wmask;
2185 			back -= w->limit;
2186 		} else
2187 			back = 0;
2188 	} else
2189 		back = 0;
2190 
2191 	/*
2192 	 * Try to allocate the smallest needed region first.
2193 	 * If that fails, fall back to the other region.
2194 	 */
2195 	error = ENOSPC;
2196 	while (front != 0 || back != 0) {
2197 		if (front != 0 && (front <= back || back == 0)) {
2198 			error = pcib_expand_window(sc, w, type, w->base - front,
2199 			    w->limit);
2200 			if (error == 0)
2201 				break;
2202 			front = 0;
2203 		} else {
2204 			error = pcib_expand_window(sc, w, type, w->base,
2205 			    w->limit + back);
2206 			if (error == 0)
2207 				break;
2208 			back = 0;
2209 		}
2210 	}
2211 
2212 	if (error)
2213 		return (error);
2214 	if (bootverbose)
2215 		device_printf(sc->dev, "grew %s window to %#jx-%#jx\n",
2216 		    w->name, (uintmax_t)w->base, (uintmax_t)w->limit);
2217 
2218 updatewin:
2219 	/* Write the new window. */
2220 	KASSERT((w->base & wmask) == 0, ("start address is not aligned"));
2221 	KASSERT((w->limit & wmask) == wmask, ("end address is not aligned"));
2222 	pcib_write_windows(sc, w->mask);
2223 	return (0);
2224 }
2225 
2226 /*
2227  * We have to trap resource allocation requests and ensure that the bridge
2228  * is set up to, or capable of handling them.
2229  */
2230 struct resource *
2231 pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
2232     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
2233 {
2234 	struct pcib_softc *sc;
2235 	struct resource *r;
2236 
2237 	sc = device_get_softc(dev);
2238 
2239 	/*
2240 	 * VGA resources are decoded iff the VGA enable bit is set in
2241 	 * the bridge control register.  VGA resources do not fall into
2242 	 * the resource windows and are passed up to the parent.
2243 	 */
2244 	if ((type == SYS_RES_IOPORT && pci_is_vga_ioport_range(start, end)) ||
2245 	    (type == SYS_RES_MEMORY && pci_is_vga_memory_range(start, end))) {
2246 		if (sc->bridgectl & PCIB_BCR_VGA_ENABLE)
2247 			return (bus_generic_alloc_resource(dev, child, type,
2248 			    rid, start, end, count, flags));
2249 		else
2250 			return (NULL);
2251 	}
2252 
2253 	switch (type) {
2254 #ifdef PCI_RES_BUS
2255 	case PCI_RES_BUS:
2256 		return (pcib_alloc_subbus(&sc->bus, child, rid, start, end,
2257 		    count, flags));
2258 #endif
2259 	case SYS_RES_IOPORT:
2260 		if (pcib_is_isa_range(sc, start, end, count))
2261 			return (NULL);
2262 		r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start,
2263 		    end, count, flags);
2264 		if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0)
2265 			break;
2266 		if (pcib_grow_window(sc, &sc->io, type, start, end, count,
2267 		    flags) == 0)
2268 			r = pcib_suballoc_resource(sc, &sc->io, child, type,
2269 			    rid, start, end, count, flags);
2270 		break;
2271 	case SYS_RES_MEMORY:
2272 		/*
2273 		 * For prefetchable resources, prefer the prefetchable
2274 		 * memory window, but fall back to the regular memory
2275 		 * window if that fails.  Try both windows before
2276 		 * attempting to grow a window in case the firmware
2277 		 * has used a range in the regular memory window to
2278 		 * map a prefetchable BAR.
2279 		 */
2280 		if (flags & RF_PREFETCHABLE) {
2281 			r = pcib_suballoc_resource(sc, &sc->pmem, child, type,
2282 			    rid, start, end, count, flags);
2283 			if (r != NULL)
2284 				break;
2285 		}
2286 		r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid,
2287 		    start, end, count, flags);
2288 		if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0)
2289 			break;
2290 		if (flags & RF_PREFETCHABLE) {
2291 			if (pcib_grow_window(sc, &sc->pmem, type, start, end,
2292 			    count, flags) == 0) {
2293 				r = pcib_suballoc_resource(sc, &sc->pmem, child,
2294 				    type, rid, start, end, count, flags);
2295 				if (r != NULL)
2296 					break;
2297 			}
2298 		}
2299 		if (pcib_grow_window(sc, &sc->mem, type, start, end, count,
2300 		    flags & ~RF_PREFETCHABLE) == 0)
2301 			r = pcib_suballoc_resource(sc, &sc->mem, child, type,
2302 			    rid, start, end, count, flags);
2303 		break;
2304 	default:
2305 		return (bus_generic_alloc_resource(dev, child, type, rid,
2306 		    start, end, count, flags));
2307 	}
2308 
2309 	/*
2310 	 * If attempts to suballocate from the window fail but this is a
2311 	 * subtractive bridge, pass the request up the tree.
2312 	 */
2313 	if (sc->flags & PCIB_SUBTRACTIVE && r == NULL)
2314 		return (bus_generic_alloc_resource(dev, child, type, rid,
2315 		    start, end, count, flags));
2316 	return (r);
2317 }
2318 
2319 int
2320 pcib_adjust_resource(device_t bus, device_t child, int type, struct resource *r,
2321     rman_res_t start, rman_res_t end)
2322 {
2323 	struct pcib_softc *sc;
2324 
2325 	sc = device_get_softc(bus);
2326 	if (pcib_is_resource_managed(sc, type, r))
2327 		return (rman_adjust_resource(r, start, end));
2328 	return (bus_generic_adjust_resource(bus, child, type, r, start, end));
2329 }
2330 
2331 int
2332 pcib_release_resource(device_t dev, device_t child, int type, int rid,
2333     struct resource *r)
2334 {
2335 	struct pcib_softc *sc;
2336 	int error;
2337 
2338 	sc = device_get_softc(dev);
2339 	if (pcib_is_resource_managed(sc, type, r)) {
2340 		if (rman_get_flags(r) & RF_ACTIVE) {
2341 			error = bus_deactivate_resource(child, type, rid, r);
2342 			if (error)
2343 				return (error);
2344 		}
2345 		return (rman_release_resource(r));
2346 	}
2347 	return (bus_generic_release_resource(dev, child, type, rid, r));
2348 }
2349 #else
2350 /*
2351  * We have to trap resource allocation requests and ensure that the bridge
2352  * is set up to, or capable of handling them.
2353  */
2354 struct resource *
2355 pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
2356     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
2357 {
2358 	struct pcib_softc	*sc = device_get_softc(dev);
2359 	const char *name, *suffix;
2360 	int ok;
2361 
2362 	/*
2363 	 * Fail the allocation for this range if it's not supported.
2364 	 */
2365 	name = device_get_nameunit(child);
2366 	if (name == NULL) {
2367 		name = "";
2368 		suffix = "";
2369 	} else
2370 		suffix = " ";
2371 	switch (type) {
2372 	case SYS_RES_IOPORT:
2373 		ok = 0;
2374 		if (!pcib_is_io_open(sc))
2375 			break;
2376 		ok = (start >= sc->iobase && end <= sc->iolimit);
2377 
2378 		/*
2379 		 * Make sure we allow access to VGA I/O addresses when the
2380 		 * bridge has the "VGA Enable" bit set.
2381 		 */
2382 		if (!ok && pci_is_vga_ioport_range(start, end))
2383 			ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0;
2384 
2385 		if ((sc->flags & PCIB_SUBTRACTIVE) == 0) {
2386 			if (!ok) {
2387 				if (start < sc->iobase)
2388 					start = sc->iobase;
2389 				if (end > sc->iolimit)
2390 					end = sc->iolimit;
2391 				if (start < end)
2392 					ok = 1;
2393 			}
2394 		} else {
2395 			ok = 1;
2396 #if 0
2397 			/*
2398 			 * If we overlap with the subtractive range, then
2399 			 * pick the upper range to use.
2400 			 */
2401 			if (start < sc->iolimit && end > sc->iobase)
2402 				start = sc->iolimit + 1;
2403 #endif
2404 		}
2405 		if (end < start) {
2406 			device_printf(dev, "ioport: end (%jx) < start (%jx)\n",
2407 			    end, start);
2408 			start = 0;
2409 			end = 0;
2410 			ok = 0;
2411 		}
2412 		if (!ok) {
2413 			device_printf(dev, "%s%srequested unsupported I/O "
2414 			    "range 0x%jx-0x%jx (decoding 0x%x-0x%x)\n",
2415 			    name, suffix, start, end, sc->iobase, sc->iolimit);
2416 			return (NULL);
2417 		}
2418 		if (bootverbose)
2419 			device_printf(dev,
2420 			    "%s%srequested I/O range 0x%jx-0x%jx: in range\n",
2421 			    name, suffix, start, end);
2422 		break;
2423 
2424 	case SYS_RES_MEMORY:
2425 		ok = 0;
2426 		if (pcib_is_nonprefetch_open(sc))
2427 			ok = ok || (start >= sc->membase && end <= sc->memlimit);
2428 		if (pcib_is_prefetch_open(sc))
2429 			ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit);
2430 
2431 		/*
2432 		 * Make sure we allow access to VGA memory addresses when the
2433 		 * bridge has the "VGA Enable" bit set.
2434 		 */
2435 		if (!ok && pci_is_vga_memory_range(start, end))
2436 			ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0;
2437 
2438 		if ((sc->flags & PCIB_SUBTRACTIVE) == 0) {
2439 			if (!ok) {
2440 				ok = 1;
2441 				if (flags & RF_PREFETCHABLE) {
2442 					if (pcib_is_prefetch_open(sc)) {
2443 						if (start < sc->pmembase)
2444 							start = sc->pmembase;
2445 						if (end > sc->pmemlimit)
2446 							end = sc->pmemlimit;
2447 					} else {
2448 						ok = 0;
2449 					}
2450 				} else {	/* non-prefetchable */
2451 					if (pcib_is_nonprefetch_open(sc)) {
2452 						if (start < sc->membase)
2453 							start = sc->membase;
2454 						if (end > sc->memlimit)
2455 							end = sc->memlimit;
2456 					} else {
2457 						ok = 0;
2458 					}
2459 				}
2460 			}
2461 		} else if (!ok) {
2462 			ok = 1;	/* subtractive bridge: always ok */
2463 #if 0
2464 			if (pcib_is_nonprefetch_open(sc)) {
2465 				if (start < sc->memlimit && end > sc->membase)
2466 					start = sc->memlimit + 1;
2467 			}
2468 			if (pcib_is_prefetch_open(sc)) {
2469 				if (start < sc->pmemlimit && end > sc->pmembase)
2470 					start = sc->pmemlimit + 1;
2471 			}
2472 #endif
2473 		}
2474 		if (end < start) {
2475 			device_printf(dev, "memory: end (%jx) < start (%jx)\n",
2476 			    end, start);
2477 			start = 0;
2478 			end = 0;
2479 			ok = 0;
2480 		}
2481 		if (!ok && bootverbose)
2482 			device_printf(dev,
2483 			    "%s%srequested unsupported memory range %#jx-%#jx "
2484 			    "(decoding %#jx-%#jx, %#jx-%#jx)\n",
2485 			    name, suffix, start, end,
2486 			    (uintmax_t)sc->membase, (uintmax_t)sc->memlimit,
2487 			    (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit);
2488 		if (!ok)
2489 			return (NULL);
2490 		if (bootverbose)
2491 			device_printf(dev,"%s%srequested memory range "
2492 			    "0x%jx-0x%jx: good\n",
2493 			    name, suffix, start, end);
2494 		break;
2495 
2496 	default:
2497 		break;
2498 	}
2499 	/*
2500 	 * Bridge is OK decoding this resource, so pass it up.
2501 	 */
2502 	return (bus_generic_alloc_resource(dev, child, type, rid, start, end,
2503 	    count, flags));
2504 }
2505 #endif
2506 
2507 /*
2508  * If ARI is enabled on this downstream port, translate the function number
2509  * to the non-ARI slot/function.  The downstream port will convert it back in
2510  * hardware.  If ARI is not enabled slot and func are not modified.
2511  */
2512 static __inline void
2513 pcib_xlate_ari(device_t pcib, int bus, int *slot, int *func)
2514 {
2515 	struct pcib_softc *sc;
2516 	int ari_func;
2517 
2518 	sc = device_get_softc(pcib);
2519 	ari_func = *func;
2520 
2521 	if (sc->flags & PCIB_ENABLE_ARI) {
2522 		KASSERT(*slot == 0,
2523 		    ("Non-zero slot number with ARI enabled!"));
2524 		*slot = PCIE_ARI_SLOT(ari_func);
2525 		*func = PCIE_ARI_FUNC(ari_func);
2526 	}
2527 }
2528 
2529 
2530 static void
2531 pcib_enable_ari(struct pcib_softc *sc, uint32_t pcie_pos)
2532 {
2533 	uint32_t ctl2;
2534 
2535 	ctl2 = pci_read_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, 4);
2536 	ctl2 |= PCIEM_CTL2_ARI;
2537 	pci_write_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, ctl2, 4);
2538 
2539 	sc->flags |= PCIB_ENABLE_ARI;
2540 }
2541 
2542 /*
2543  * PCIB interface.
2544  */
2545 int
2546 pcib_maxslots(device_t dev)
2547 {
2548 	return (PCI_SLOTMAX);
2549 }
2550 
2551 static int
2552 pcib_ari_maxslots(device_t dev)
2553 {
2554 	struct pcib_softc *sc;
2555 
2556 	sc = device_get_softc(dev);
2557 
2558 	if (sc->flags & PCIB_ENABLE_ARI)
2559 		return (PCIE_ARI_SLOTMAX);
2560 	else
2561 		return (PCI_SLOTMAX);
2562 }
2563 
2564 static int
2565 pcib_ari_maxfuncs(device_t dev)
2566 {
2567 	struct pcib_softc *sc;
2568 
2569 	sc = device_get_softc(dev);
2570 
2571 	if (sc->flags & PCIB_ENABLE_ARI)
2572 		return (PCIE_ARI_FUNCMAX);
2573 	else
2574 		return (PCI_FUNCMAX);
2575 }
2576 
2577 static void
2578 pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot,
2579     int *func)
2580 {
2581 	struct pcib_softc *sc;
2582 
2583 	sc = device_get_softc(pcib);
2584 
2585 	*bus = PCI_RID2BUS(rid);
2586 	if (sc->flags & PCIB_ENABLE_ARI) {
2587 		*slot = PCIE_ARI_RID2SLOT(rid);
2588 		*func = PCIE_ARI_RID2FUNC(rid);
2589 	} else {
2590 		*slot = PCI_RID2SLOT(rid);
2591 		*func = PCI_RID2FUNC(rid);
2592 	}
2593 }
2594 
2595 /*
2596  * Since we are a child of a PCI bus, its parent must support the pcib interface.
2597  */
2598 static uint32_t
2599 pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width)
2600 {
2601 #ifdef PCI_HP
2602 	struct pcib_softc *sc;
2603 
2604 	sc = device_get_softc(dev);
2605 	if (!pcib_present(sc)) {
2606 		switch (width) {
2607 		case 2:
2608 			return (0xffff);
2609 		case 1:
2610 			return (0xff);
2611 		default:
2612 			return (0xffffffff);
2613 		}
2614 	}
2615 #endif
2616 	pcib_xlate_ari(dev, b, &s, &f);
2617 	return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s,
2618 	    f, reg, width));
2619 }
2620 
2621 static void
2622 pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width)
2623 {
2624 #ifdef PCI_HP
2625 	struct pcib_softc *sc;
2626 
2627 	sc = device_get_softc(dev);
2628 	if (!pcib_present(sc))
2629 		return;
2630 #endif
2631 	pcib_xlate_ari(dev, b, &s, &f);
2632 	PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f,
2633 	    reg, val, width);
2634 }
2635 
2636 /*
2637  * Route an interrupt across a PCI bridge.
2638  */
2639 int
2640 pcib_route_interrupt(device_t pcib, device_t dev, int pin)
2641 {
2642     device_t	bus;
2643     int		parent_intpin;
2644     int		intnum;
2645 
2646     /*
2647      *
2648      * The PCI standard defines a swizzle of the child-side device/intpin to
2649      * the parent-side intpin as follows.
2650      *
2651      * device = device on child bus
2652      * child_intpin = intpin on child bus slot (0-3)
2653      * parent_intpin = intpin on parent bus slot (0-3)
2654      *
2655      * parent_intpin = (device + child_intpin) % 4
2656      */
2657     parent_intpin = (pci_get_slot(dev) + (pin - 1)) % 4;
2658 
2659     /*
2660      * Our parent is a PCI bus.  Its parent must export the pcib interface
2661      * which includes the ability to route interrupts.
2662      */
2663     bus = device_get_parent(pcib);
2664     intnum = PCIB_ROUTE_INTERRUPT(device_get_parent(bus), pcib, parent_intpin + 1);
2665     if (PCI_INTERRUPT_VALID(intnum) && bootverbose) {
2666 	device_printf(pcib, "slot %d INT%c is routed to irq %d\n",
2667 	    pci_get_slot(dev), 'A' + pin - 1, intnum);
2668     }
2669     return(intnum);
2670 }
2671 
2672 /* Pass request to alloc MSI/MSI-X messages up to the parent bridge. */
2673 int
2674 pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs)
2675 {
2676 	struct pcib_softc *sc = device_get_softc(pcib);
2677 	device_t bus;
2678 
2679 	if (sc->flags & PCIB_DISABLE_MSI)
2680 		return (ENXIO);
2681 	bus = device_get_parent(pcib);
2682 	return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount,
2683 	    irqs));
2684 }
2685 
2686 /* Pass request to release MSI/MSI-X messages up to the parent bridge. */
2687 int
2688 pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs)
2689 {
2690 	device_t bus;
2691 
2692 	bus = device_get_parent(pcib);
2693 	return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs));
2694 }
2695 
2696 /* Pass request to alloc an MSI-X message up to the parent bridge. */
2697 int
2698 pcib_alloc_msix(device_t pcib, device_t dev, int *irq)
2699 {
2700 	struct pcib_softc *sc = device_get_softc(pcib);
2701 	device_t bus;
2702 
2703 	if (sc->flags & PCIB_DISABLE_MSIX)
2704 		return (ENXIO);
2705 	bus = device_get_parent(pcib);
2706 	return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq));
2707 }
2708 
2709 /* Pass request to release an MSI-X message up to the parent bridge. */
2710 int
2711 pcib_release_msix(device_t pcib, device_t dev, int irq)
2712 {
2713 	device_t bus;
2714 
2715 	bus = device_get_parent(pcib);
2716 	return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq));
2717 }
2718 
2719 /* Pass request to map MSI/MSI-X message up to parent bridge. */
2720 int
2721 pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr,
2722     uint32_t *data)
2723 {
2724 	device_t bus;
2725 	int error;
2726 
2727 	bus = device_get_parent(pcib);
2728 	error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data);
2729 	if (error)
2730 		return (error);
2731 
2732 	pci_ht_map_msi(pcib, *addr);
2733 	return (0);
2734 }
2735 
2736 /* Pass request for device power state up to parent bridge. */
2737 int
2738 pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate)
2739 {
2740 	device_t bus;
2741 
2742 	bus = device_get_parent(pcib);
2743 	return (PCIB_POWER_FOR_SLEEP(bus, dev, pstate));
2744 }
2745 
2746 static int
2747 pcib_ari_enabled(device_t pcib)
2748 {
2749 	struct pcib_softc *sc;
2750 
2751 	sc = device_get_softc(pcib);
2752 
2753 	return ((sc->flags & PCIB_ENABLE_ARI) != 0);
2754 }
2755 
2756 static int
2757 pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type,
2758     uintptr_t *id)
2759 {
2760 	struct pcib_softc *sc;
2761 	device_t bus_dev;
2762 	uint8_t bus, slot, func;
2763 
2764 	if (type != PCI_ID_RID) {
2765 		bus_dev = device_get_parent(pcib);
2766 		return (PCIB_GET_ID(device_get_parent(bus_dev), dev, type, id));
2767 	}
2768 
2769 	sc = device_get_softc(pcib);
2770 
2771 	if (sc->flags & PCIB_ENABLE_ARI) {
2772 		bus = pci_get_bus(dev);
2773 		func = pci_get_function(dev);
2774 
2775 		*id = (PCI_ARI_RID(bus, func));
2776 	} else {
2777 		bus = pci_get_bus(dev);
2778 		slot = pci_get_slot(dev);
2779 		func = pci_get_function(dev);
2780 
2781 		*id = (PCI_RID(bus, slot, func));
2782 	}
2783 
2784 	return (0);
2785 }
2786 
2787 /*
2788  * Check that the downstream port (pcib) and the endpoint device (dev) both
2789  * support ARI.  If so, enable it and return 0, otherwise return an error.
2790  */
2791 static int
2792 pcib_try_enable_ari(device_t pcib, device_t dev)
2793 {
2794 	struct pcib_softc *sc;
2795 	int error;
2796 	uint32_t cap2;
2797 	int ari_cap_off;
2798 	uint32_t ari_ver;
2799 	uint32_t pcie_pos;
2800 
2801 	sc = device_get_softc(pcib);
2802 
2803 	/*
2804 	 * ARI is controlled in a register in the PCIe capability structure.
2805 	 * If the downstream port does not have the PCIe capability structure
2806 	 * then it does not support ARI.
2807 	 */
2808 	error = pci_find_cap(pcib, PCIY_EXPRESS, &pcie_pos);
2809 	if (error != 0)
2810 		return (ENODEV);
2811 
2812 	/* Check that the PCIe port advertises ARI support. */
2813 	cap2 = pci_read_config(pcib, pcie_pos + PCIER_DEVICE_CAP2, 4);
2814 	if (!(cap2 & PCIEM_CAP2_ARI))
2815 		return (ENODEV);
2816 
2817 	/*
2818 	 * Check that the endpoint device advertises ARI support via the ARI
2819 	 * extended capability structure.
2820 	 */
2821 	error = pci_find_extcap(dev, PCIZ_ARI, &ari_cap_off);
2822 	if (error != 0)
2823 		return (ENODEV);
2824 
2825 	/*
2826 	 * Finally, check that the endpoint device supports the same version
2827 	 * of ARI that we do.
2828 	 */
2829 	ari_ver = pci_read_config(dev, ari_cap_off, 4);
2830 	if (PCI_EXTCAP_VER(ari_ver) != PCIB_SUPPORTED_ARI_VER) {
2831 		if (bootverbose)
2832 			device_printf(pcib,
2833 			    "Unsupported version of ARI (%d) detected\n",
2834 			    PCI_EXTCAP_VER(ari_ver));
2835 
2836 		return (ENXIO);
2837 	}
2838 
2839 	pcib_enable_ari(sc, pcie_pos);
2840 
2841 	return (0);
2842 }
2843 
2844 int
2845 pcib_request_feature_allow(device_t pcib, device_t dev,
2846     enum pci_feature feature)
2847 {
2848 	/*
2849 	 * No host firmware we have to negotiate with, so we allow
2850 	 * every valid feature requested.
2851 	 */
2852 	switch (feature) {
2853 	case PCI_FEATURE_AER:
2854 	case PCI_FEATURE_HP:
2855 		break;
2856 	default:
2857 		return (EINVAL);
2858 	}
2859 
2860 	return (0);
2861 }
2862 
2863 int
2864 pcib_request_feature(device_t dev, enum pci_feature feature)
2865 {
2866 
2867 	/*
2868 	 * Invoke PCIB_REQUEST_FEATURE of this bridge first in case
2869 	 * the firmware overrides the method of PCI-PCI bridges.
2870 	 */
2871 	return (PCIB_REQUEST_FEATURE(dev, dev, feature));
2872 }
2873 
2874 /*
2875  * Pass the request to use this PCI feature up the tree. Either there's a
2876  * firmware like ACPI that's using this feature that will approve (or deny) the
2877  * request to take it over, or the platform has no such firmware, in which case
2878  * the request will be approved. If the request is approved, the OS is expected
2879  * to make use of the feature or render it harmless.
2880  */
2881 static int
2882 pcib_request_feature_default(device_t pcib, device_t dev,
2883     enum pci_feature feature)
2884 {
2885 	device_t bus;
2886 
2887 	/*
2888 	 * Our parent is necessarily a pci bus. Its parent will either be
2889 	 * another pci bridge (which passes it up) or a host bridge that can
2890 	 * approve or reject the request.
2891 	 */
2892 	bus = device_get_parent(pcib);
2893 	return (PCIB_REQUEST_FEATURE(device_get_parent(bus), dev, feature));
2894 }
2895