1 /*- 2 * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier 3 * Copyright (c) 2000 Michael Smith <msmith@freebsd.org> 4 * Copyright (c) 2000 BSDi 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * PCI:PCI bridge support. 36 */ 37 38 #include "opt_pci.h" 39 40 #include <sys/param.h> 41 #include <sys/bus.h> 42 #include <sys/kernel.h> 43 #include <sys/malloc.h> 44 #include <sys/module.h> 45 #include <sys/rman.h> 46 #include <sys/sysctl.h> 47 #include <sys/systm.h> 48 #include <sys/taskqueue.h> 49 50 #include <dev/pci/pcivar.h> 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pci_private.h> 53 #include <dev/pci/pcib_private.h> 54 55 #include "pcib_if.h" 56 57 static int pcib_probe(device_t dev); 58 static int pcib_suspend(device_t dev); 59 static int pcib_resume(device_t dev); 60 static int pcib_power_for_sleep(device_t pcib, device_t dev, 61 int *pstate); 62 static uint16_t pcib_ari_get_rid(device_t pcib, device_t dev); 63 static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, 64 u_int f, u_int reg, int width); 65 static void pcib_write_config(device_t dev, u_int b, u_int s, 66 u_int f, u_int reg, uint32_t val, int width); 67 static int pcib_ari_maxslots(device_t dev); 68 static int pcib_ari_maxfuncs(device_t dev); 69 static int pcib_try_enable_ari(device_t pcib, device_t dev); 70 static int pcib_ari_enabled(device_t pcib); 71 static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, 72 int *bus, int *slot, int *func); 73 #ifdef PCI_HP 74 static void pcib_pcie_ab_timeout(void *arg); 75 static void pcib_pcie_cc_timeout(void *arg); 76 static void pcib_pcie_dll_timeout(void *arg); 77 #endif 78 79 static device_method_t pcib_methods[] = { 80 /* Device interface */ 81 DEVMETHOD(device_probe, pcib_probe), 82 DEVMETHOD(device_attach, pcib_attach), 83 DEVMETHOD(device_detach, bus_generic_detach), 84 DEVMETHOD(device_shutdown, bus_generic_shutdown), 85 DEVMETHOD(device_suspend, pcib_suspend), 86 DEVMETHOD(device_resume, pcib_resume), 87 88 /* Bus interface */ 89 DEVMETHOD(bus_child_present, pcib_child_present), 90 DEVMETHOD(bus_read_ivar, pcib_read_ivar), 91 DEVMETHOD(bus_write_ivar, pcib_write_ivar), 92 DEVMETHOD(bus_alloc_resource, pcib_alloc_resource), 93 #ifdef NEW_PCIB 94 DEVMETHOD(bus_adjust_resource, pcib_adjust_resource), 95 DEVMETHOD(bus_release_resource, pcib_release_resource), 96 #else 97 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), 98 DEVMETHOD(bus_release_resource, bus_generic_release_resource), 99 #endif 100 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 101 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 102 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 103 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 104 105 /* pcib interface */ 106 DEVMETHOD(pcib_maxslots, pcib_ari_maxslots), 107 DEVMETHOD(pcib_maxfuncs, pcib_ari_maxfuncs), 108 DEVMETHOD(pcib_read_config, pcib_read_config), 109 DEVMETHOD(pcib_write_config, pcib_write_config), 110 DEVMETHOD(pcib_route_interrupt, pcib_route_interrupt), 111 DEVMETHOD(pcib_alloc_msi, pcib_alloc_msi), 112 DEVMETHOD(pcib_release_msi, pcib_release_msi), 113 DEVMETHOD(pcib_alloc_msix, pcib_alloc_msix), 114 DEVMETHOD(pcib_release_msix, pcib_release_msix), 115 DEVMETHOD(pcib_map_msi, pcib_map_msi), 116 DEVMETHOD(pcib_power_for_sleep, pcib_power_for_sleep), 117 DEVMETHOD(pcib_get_rid, pcib_ari_get_rid), 118 DEVMETHOD(pcib_try_enable_ari, pcib_try_enable_ari), 119 DEVMETHOD(pcib_ari_enabled, pcib_ari_enabled), 120 DEVMETHOD(pcib_decode_rid, pcib_ari_decode_rid), 121 122 DEVMETHOD_END 123 }; 124 125 static devclass_t pcib_devclass; 126 127 DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc)); 128 DRIVER_MODULE(pcib, pci, pcib_driver, pcib_devclass, NULL, NULL); 129 130 #ifdef NEW_PCIB 131 SYSCTL_DECL(_hw_pci); 132 133 static int pci_clear_pcib; 134 SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0, 135 "Clear firmware-assigned resources for PCI-PCI bridge I/O windows."); 136 137 /* 138 * Is a resource from a child device sub-allocated from one of our 139 * resource managers? 140 */ 141 static int 142 pcib_is_resource_managed(struct pcib_softc *sc, int type, struct resource *r) 143 { 144 145 switch (type) { 146 #ifdef PCI_RES_BUS 147 case PCI_RES_BUS: 148 return (rman_is_region_manager(r, &sc->bus.rman)); 149 #endif 150 case SYS_RES_IOPORT: 151 return (rman_is_region_manager(r, &sc->io.rman)); 152 case SYS_RES_MEMORY: 153 /* Prefetchable resources may live in either memory rman. */ 154 if (rman_get_flags(r) & RF_PREFETCHABLE && 155 rman_is_region_manager(r, &sc->pmem.rman)) 156 return (1); 157 return (rman_is_region_manager(r, &sc->mem.rman)); 158 } 159 return (0); 160 } 161 162 static int 163 pcib_is_window_open(struct pcib_window *pw) 164 { 165 166 return (pw->valid && pw->base < pw->limit); 167 } 168 169 /* 170 * XXX: If RF_ACTIVE did not also imply allocating a bus space tag and 171 * handle for the resource, we could pass RF_ACTIVE up to the PCI bus 172 * when allocating the resource windows and rely on the PCI bus driver 173 * to do this for us. 174 */ 175 static void 176 pcib_activate_window(struct pcib_softc *sc, int type) 177 { 178 179 PCI_ENABLE_IO(device_get_parent(sc->dev), sc->dev, type); 180 } 181 182 static void 183 pcib_write_windows(struct pcib_softc *sc, int mask) 184 { 185 device_t dev; 186 uint32_t val; 187 188 dev = sc->dev; 189 if (sc->io.valid && mask & WIN_IO) { 190 val = pci_read_config(dev, PCIR_IOBASEL_1, 1); 191 if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { 192 pci_write_config(dev, PCIR_IOBASEH_1, 193 sc->io.base >> 16, 2); 194 pci_write_config(dev, PCIR_IOLIMITH_1, 195 sc->io.limit >> 16, 2); 196 } 197 pci_write_config(dev, PCIR_IOBASEL_1, sc->io.base >> 8, 1); 198 pci_write_config(dev, PCIR_IOLIMITL_1, sc->io.limit >> 8, 1); 199 } 200 201 if (mask & WIN_MEM) { 202 pci_write_config(dev, PCIR_MEMBASE_1, sc->mem.base >> 16, 2); 203 pci_write_config(dev, PCIR_MEMLIMIT_1, sc->mem.limit >> 16, 2); 204 } 205 206 if (sc->pmem.valid && mask & WIN_PMEM) { 207 val = pci_read_config(dev, PCIR_PMBASEL_1, 2); 208 if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { 209 pci_write_config(dev, PCIR_PMBASEH_1, 210 sc->pmem.base >> 32, 4); 211 pci_write_config(dev, PCIR_PMLIMITH_1, 212 sc->pmem.limit >> 32, 4); 213 } 214 pci_write_config(dev, PCIR_PMBASEL_1, sc->pmem.base >> 16, 2); 215 pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmem.limit >> 16, 2); 216 } 217 } 218 219 /* 220 * This is used to reject I/O port allocations that conflict with an 221 * ISA alias range. 222 */ 223 static int 224 pcib_is_isa_range(struct pcib_softc *sc, rman_res_t start, rman_res_t end, 225 rman_res_t count) 226 { 227 rman_res_t next_alias; 228 229 if (!(sc->bridgectl & PCIB_BCR_ISA_ENABLE)) 230 return (0); 231 232 /* Only check fixed ranges for overlap. */ 233 if (start + count - 1 != end) 234 return (0); 235 236 /* ISA aliases are only in the lower 64KB of I/O space. */ 237 if (start >= 65536) 238 return (0); 239 240 /* Check for overlap with 0x000 - 0x0ff as a special case. */ 241 if (start < 0x100) 242 goto alias; 243 244 /* 245 * If the start address is an alias, the range is an alias. 246 * Otherwise, compute the start of the next alias range and 247 * check if it is before the end of the candidate range. 248 */ 249 if ((start & 0x300) != 0) 250 goto alias; 251 next_alias = (start & ~0x3fful) | 0x100; 252 if (next_alias <= end) 253 goto alias; 254 return (0); 255 256 alias: 257 if (bootverbose) 258 device_printf(sc->dev, 259 "I/O range %#jx-%#jx overlaps with an ISA alias\n", start, 260 end); 261 return (1); 262 } 263 264 static void 265 pcib_add_window_resources(struct pcib_window *w, struct resource **res, 266 int count) 267 { 268 struct resource **newarray; 269 int error, i; 270 271 newarray = malloc(sizeof(struct resource *) * (w->count + count), 272 M_DEVBUF, M_WAITOK); 273 if (w->res != NULL) 274 bcopy(w->res, newarray, sizeof(struct resource *) * w->count); 275 bcopy(res, newarray + w->count, sizeof(struct resource *) * count); 276 free(w->res, M_DEVBUF); 277 w->res = newarray; 278 w->count += count; 279 280 for (i = 0; i < count; i++) { 281 error = rman_manage_region(&w->rman, rman_get_start(res[i]), 282 rman_get_end(res[i])); 283 if (error) 284 panic("Failed to add resource to rman"); 285 } 286 } 287 288 typedef void (nonisa_callback)(rman_res_t start, rman_res_t end, void *arg); 289 290 static void 291 pcib_walk_nonisa_ranges(rman_res_t start, rman_res_t end, nonisa_callback *cb, 292 void *arg) 293 { 294 rman_res_t next_end; 295 296 /* 297 * If start is within an ISA alias range, move up to the start 298 * of the next non-alias range. As a special case, addresses 299 * in the range 0x000 - 0x0ff should also be skipped since 300 * those are used for various system I/O devices in ISA 301 * systems. 302 */ 303 if (start <= 65535) { 304 if (start < 0x100 || (start & 0x300) != 0) { 305 start &= ~0x3ff; 306 start += 0x400; 307 } 308 } 309 310 /* ISA aliases are only in the lower 64KB of I/O space. */ 311 while (start <= MIN(end, 65535)) { 312 next_end = MIN(start | 0xff, end); 313 cb(start, next_end, arg); 314 start += 0x400; 315 } 316 317 if (start <= end) 318 cb(start, end, arg); 319 } 320 321 static void 322 count_ranges(rman_res_t start, rman_res_t end, void *arg) 323 { 324 int *countp; 325 326 countp = arg; 327 (*countp)++; 328 } 329 330 struct alloc_state { 331 struct resource **res; 332 struct pcib_softc *sc; 333 int count, error; 334 }; 335 336 static void 337 alloc_ranges(rman_res_t start, rman_res_t end, void *arg) 338 { 339 struct alloc_state *as; 340 struct pcib_window *w; 341 int rid; 342 343 as = arg; 344 if (as->error != 0) 345 return; 346 347 w = &as->sc->io; 348 rid = w->reg; 349 if (bootverbose) 350 device_printf(as->sc->dev, 351 "allocating non-ISA range %#jx-%#jx\n", start, end); 352 as->res[as->count] = bus_alloc_resource(as->sc->dev, SYS_RES_IOPORT, 353 &rid, start, end, end - start + 1, 0); 354 if (as->res[as->count] == NULL) 355 as->error = ENXIO; 356 else 357 as->count++; 358 } 359 360 static int 361 pcib_alloc_nonisa_ranges(struct pcib_softc *sc, rman_res_t start, rman_res_t end) 362 { 363 struct alloc_state as; 364 int i, new_count; 365 366 /* First, see how many ranges we need. */ 367 new_count = 0; 368 pcib_walk_nonisa_ranges(start, end, count_ranges, &new_count); 369 370 /* Second, allocate the ranges. */ 371 as.res = malloc(sizeof(struct resource *) * new_count, M_DEVBUF, 372 M_WAITOK); 373 as.sc = sc; 374 as.count = 0; 375 as.error = 0; 376 pcib_walk_nonisa_ranges(start, end, alloc_ranges, &as); 377 if (as.error != 0) { 378 for (i = 0; i < as.count; i++) 379 bus_release_resource(sc->dev, SYS_RES_IOPORT, 380 sc->io.reg, as.res[i]); 381 free(as.res, M_DEVBUF); 382 return (as.error); 383 } 384 KASSERT(as.count == new_count, ("%s: count mismatch", __func__)); 385 386 /* Third, add the ranges to the window. */ 387 pcib_add_window_resources(&sc->io, as.res, as.count); 388 free(as.res, M_DEVBUF); 389 return (0); 390 } 391 392 static void 393 pcib_alloc_window(struct pcib_softc *sc, struct pcib_window *w, int type, 394 int flags, pci_addr_t max_address) 395 { 396 struct resource *res; 397 char buf[64]; 398 int error, rid; 399 400 if (max_address != (rman_res_t)max_address) 401 max_address = ~0; 402 w->rman.rm_start = 0; 403 w->rman.rm_end = max_address; 404 w->rman.rm_type = RMAN_ARRAY; 405 snprintf(buf, sizeof(buf), "%s %s window", 406 device_get_nameunit(sc->dev), w->name); 407 w->rman.rm_descr = strdup(buf, M_DEVBUF); 408 error = rman_init(&w->rman); 409 if (error) 410 panic("Failed to initialize %s %s rman", 411 device_get_nameunit(sc->dev), w->name); 412 413 if (!pcib_is_window_open(w)) 414 return; 415 416 if (w->base > max_address || w->limit > max_address) { 417 device_printf(sc->dev, 418 "initial %s window has too many bits, ignoring\n", w->name); 419 return; 420 } 421 if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE) 422 (void)pcib_alloc_nonisa_ranges(sc, w->base, w->limit); 423 else { 424 rid = w->reg; 425 res = bus_alloc_resource(sc->dev, type, &rid, w->base, w->limit, 426 w->limit - w->base + 1, flags); 427 if (res != NULL) 428 pcib_add_window_resources(w, &res, 1); 429 } 430 if (w->res == NULL) { 431 device_printf(sc->dev, 432 "failed to allocate initial %s window: %#jx-%#jx\n", 433 w->name, (uintmax_t)w->base, (uintmax_t)w->limit); 434 w->base = max_address; 435 w->limit = 0; 436 pcib_write_windows(sc, w->mask); 437 return; 438 } 439 pcib_activate_window(sc, type); 440 } 441 442 /* 443 * Initialize I/O windows. 444 */ 445 static void 446 pcib_probe_windows(struct pcib_softc *sc) 447 { 448 pci_addr_t max; 449 device_t dev; 450 uint32_t val; 451 452 dev = sc->dev; 453 454 if (pci_clear_pcib) { 455 pcib_bridge_init(dev); 456 } 457 458 /* Determine if the I/O port window is implemented. */ 459 val = pci_read_config(dev, PCIR_IOBASEL_1, 1); 460 if (val == 0) { 461 /* 462 * If 'val' is zero, then only 16-bits of I/O space 463 * are supported. 464 */ 465 pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); 466 if (pci_read_config(dev, PCIR_IOBASEL_1, 1) != 0) { 467 sc->io.valid = 1; 468 pci_write_config(dev, PCIR_IOBASEL_1, 0, 1); 469 } 470 } else 471 sc->io.valid = 1; 472 473 /* Read the existing I/O port window. */ 474 if (sc->io.valid) { 475 sc->io.reg = PCIR_IOBASEL_1; 476 sc->io.step = 12; 477 sc->io.mask = WIN_IO; 478 sc->io.name = "I/O port"; 479 if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { 480 sc->io.base = PCI_PPBIOBASE( 481 pci_read_config(dev, PCIR_IOBASEH_1, 2), val); 482 sc->io.limit = PCI_PPBIOLIMIT( 483 pci_read_config(dev, PCIR_IOLIMITH_1, 2), 484 pci_read_config(dev, PCIR_IOLIMITL_1, 1)); 485 max = 0xffffffff; 486 } else { 487 sc->io.base = PCI_PPBIOBASE(0, val); 488 sc->io.limit = PCI_PPBIOLIMIT(0, 489 pci_read_config(dev, PCIR_IOLIMITL_1, 1)); 490 max = 0xffff; 491 } 492 pcib_alloc_window(sc, &sc->io, SYS_RES_IOPORT, 0, max); 493 } 494 495 /* Read the existing memory window. */ 496 sc->mem.valid = 1; 497 sc->mem.reg = PCIR_MEMBASE_1; 498 sc->mem.step = 20; 499 sc->mem.mask = WIN_MEM; 500 sc->mem.name = "memory"; 501 sc->mem.base = PCI_PPBMEMBASE(0, 502 pci_read_config(dev, PCIR_MEMBASE_1, 2)); 503 sc->mem.limit = PCI_PPBMEMLIMIT(0, 504 pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); 505 pcib_alloc_window(sc, &sc->mem, SYS_RES_MEMORY, 0, 0xffffffff); 506 507 /* Determine if the prefetchable memory window is implemented. */ 508 val = pci_read_config(dev, PCIR_PMBASEL_1, 2); 509 if (val == 0) { 510 /* 511 * If 'val' is zero, then only 32-bits of memory space 512 * are supported. 513 */ 514 pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); 515 if (pci_read_config(dev, PCIR_PMBASEL_1, 2) != 0) { 516 sc->pmem.valid = 1; 517 pci_write_config(dev, PCIR_PMBASEL_1, 0, 2); 518 } 519 } else 520 sc->pmem.valid = 1; 521 522 /* Read the existing prefetchable memory window. */ 523 if (sc->pmem.valid) { 524 sc->pmem.reg = PCIR_PMBASEL_1; 525 sc->pmem.step = 20; 526 sc->pmem.mask = WIN_PMEM; 527 sc->pmem.name = "prefetch"; 528 if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { 529 sc->pmem.base = PCI_PPBMEMBASE( 530 pci_read_config(dev, PCIR_PMBASEH_1, 4), val); 531 sc->pmem.limit = PCI_PPBMEMLIMIT( 532 pci_read_config(dev, PCIR_PMLIMITH_1, 4), 533 pci_read_config(dev, PCIR_PMLIMITL_1, 2)); 534 max = 0xffffffffffffffff; 535 } else { 536 sc->pmem.base = PCI_PPBMEMBASE(0, val); 537 sc->pmem.limit = PCI_PPBMEMLIMIT(0, 538 pci_read_config(dev, PCIR_PMLIMITL_1, 2)); 539 max = 0xffffffff; 540 } 541 pcib_alloc_window(sc, &sc->pmem, SYS_RES_MEMORY, 542 RF_PREFETCHABLE, max); 543 } 544 } 545 546 #ifdef PCI_RES_BUS 547 /* 548 * Allocate a suitable secondary bus for this bridge if needed and 549 * initialize the resource manager for the secondary bus range. Note 550 * that the minimum count is a desired value and this may allocate a 551 * smaller range. 552 */ 553 void 554 pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count) 555 { 556 char buf[64]; 557 int error, rid, sec_reg; 558 559 switch (pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) { 560 case PCIM_HDRTYPE_BRIDGE: 561 sec_reg = PCIR_SECBUS_1; 562 bus->sub_reg = PCIR_SUBBUS_1; 563 break; 564 case PCIM_HDRTYPE_CARDBUS: 565 sec_reg = PCIR_SECBUS_2; 566 bus->sub_reg = PCIR_SUBBUS_2; 567 break; 568 default: 569 panic("not a PCI bridge"); 570 } 571 bus->sec = pci_read_config(dev, sec_reg, 1); 572 bus->sub = pci_read_config(dev, bus->sub_reg, 1); 573 bus->dev = dev; 574 bus->rman.rm_start = 0; 575 bus->rman.rm_end = PCI_BUSMAX; 576 bus->rman.rm_type = RMAN_ARRAY; 577 snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); 578 bus->rman.rm_descr = strdup(buf, M_DEVBUF); 579 error = rman_init(&bus->rman); 580 if (error) 581 panic("Failed to initialize %s bus number rman", 582 device_get_nameunit(dev)); 583 584 /* 585 * Allocate a bus range. This will return an existing bus range 586 * if one exists, or a new bus range if one does not. 587 */ 588 rid = 0; 589 bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, 590 min_count, 0); 591 if (bus->res == NULL) { 592 /* 593 * Fall back to just allocating a range of a single bus 594 * number. 595 */ 596 bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, 597 1, 0); 598 } else if (rman_get_size(bus->res) < min_count) 599 /* 600 * Attempt to grow the existing range to satisfy the 601 * minimum desired count. 602 */ 603 (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res, 604 rman_get_start(bus->res), rman_get_start(bus->res) + 605 min_count - 1); 606 607 /* 608 * Add the initial resource to the rman. 609 */ 610 if (bus->res != NULL) { 611 error = rman_manage_region(&bus->rman, rman_get_start(bus->res), 612 rman_get_end(bus->res)); 613 if (error) 614 panic("Failed to add resource to rman"); 615 bus->sec = rman_get_start(bus->res); 616 bus->sub = rman_get_end(bus->res); 617 } 618 } 619 620 static struct resource * 621 pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid, 622 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 623 { 624 struct resource *res; 625 626 res = rman_reserve_resource(&bus->rman, start, end, count, flags, 627 child); 628 if (res == NULL) 629 return (NULL); 630 631 if (bootverbose) 632 device_printf(bus->dev, 633 "allocated bus range (%ju-%ju) for rid %d of %s\n", 634 rman_get_start(res), rman_get_end(res), *rid, 635 pcib_child_name(child)); 636 rman_set_rid(res, *rid); 637 return (res); 638 } 639 640 /* 641 * Attempt to grow the secondary bus range. This is much simpler than 642 * for I/O windows as the range can only be grown by increasing 643 * subbus. 644 */ 645 static int 646 pcib_grow_subbus(struct pcib_secbus *bus, rman_res_t new_end) 647 { 648 rman_res_t old_end; 649 int error; 650 651 old_end = rman_get_end(bus->res); 652 KASSERT(new_end > old_end, ("attempt to shrink subbus")); 653 error = bus_adjust_resource(bus->dev, PCI_RES_BUS, bus->res, 654 rman_get_start(bus->res), new_end); 655 if (error) 656 return (error); 657 if (bootverbose) 658 device_printf(bus->dev, "grew bus range to %ju-%ju\n", 659 rman_get_start(bus->res), rman_get_end(bus->res)); 660 error = rman_manage_region(&bus->rman, old_end + 1, 661 rman_get_end(bus->res)); 662 if (error) 663 panic("Failed to add resource to rman"); 664 bus->sub = rman_get_end(bus->res); 665 pci_write_config(bus->dev, bus->sub_reg, bus->sub, 1); 666 return (0); 667 } 668 669 struct resource * 670 pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid, 671 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 672 { 673 struct resource *res; 674 rman_res_t start_free, end_free, new_end; 675 676 /* 677 * First, see if the request can be satisified by the existing 678 * bus range. 679 */ 680 res = pcib_suballoc_bus(bus, child, rid, start, end, count, flags); 681 if (res != NULL) 682 return (res); 683 684 /* 685 * Figure out a range to grow the bus range. First, find the 686 * first bus number after the last allocated bus in the rman and 687 * enforce that as a minimum starting point for the range. 688 */ 689 if (rman_last_free_region(&bus->rman, &start_free, &end_free) != 0 || 690 end_free != bus->sub) 691 start_free = bus->sub + 1; 692 if (start_free < start) 693 start_free = start; 694 new_end = start_free + count - 1; 695 696 /* 697 * See if this new range would satisfy the request if it 698 * succeeds. 699 */ 700 if (new_end > end) 701 return (NULL); 702 703 /* Finally, attempt to grow the existing resource. */ 704 if (bootverbose) { 705 device_printf(bus->dev, 706 "attempting to grow bus range for %ju buses\n", count); 707 printf("\tback candidate range: %ju-%ju\n", start_free, 708 new_end); 709 } 710 if (pcib_grow_subbus(bus, new_end) == 0) 711 return (pcib_suballoc_bus(bus, child, rid, start, end, count, 712 flags)); 713 return (NULL); 714 } 715 #endif 716 717 #else 718 719 /* 720 * Is the prefetch window open (eg, can we allocate memory in it?) 721 */ 722 static int 723 pcib_is_prefetch_open(struct pcib_softc *sc) 724 { 725 return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit); 726 } 727 728 /* 729 * Is the nonprefetch window open (eg, can we allocate memory in it?) 730 */ 731 static int 732 pcib_is_nonprefetch_open(struct pcib_softc *sc) 733 { 734 return (sc->membase > 0 && sc->membase < sc->memlimit); 735 } 736 737 /* 738 * Is the io window open (eg, can we allocate ports in it?) 739 */ 740 static int 741 pcib_is_io_open(struct pcib_softc *sc) 742 { 743 return (sc->iobase > 0 && sc->iobase < sc->iolimit); 744 } 745 746 /* 747 * Get current I/O decode. 748 */ 749 static void 750 pcib_get_io_decode(struct pcib_softc *sc) 751 { 752 device_t dev; 753 uint32_t iolow; 754 755 dev = sc->dev; 756 757 iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1); 758 if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) 759 sc->iobase = PCI_PPBIOBASE( 760 pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow); 761 else 762 sc->iobase = PCI_PPBIOBASE(0, iolow); 763 764 iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1); 765 if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) 766 sc->iolimit = PCI_PPBIOLIMIT( 767 pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow); 768 else 769 sc->iolimit = PCI_PPBIOLIMIT(0, iolow); 770 } 771 772 /* 773 * Get current memory decode. 774 */ 775 static void 776 pcib_get_mem_decode(struct pcib_softc *sc) 777 { 778 device_t dev; 779 pci_addr_t pmemlow; 780 781 dev = sc->dev; 782 783 sc->membase = PCI_PPBMEMBASE(0, 784 pci_read_config(dev, PCIR_MEMBASE_1, 2)); 785 sc->memlimit = PCI_PPBMEMLIMIT(0, 786 pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); 787 788 pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2); 789 if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) 790 sc->pmembase = PCI_PPBMEMBASE( 791 pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow); 792 else 793 sc->pmembase = PCI_PPBMEMBASE(0, pmemlow); 794 795 pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2); 796 if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) 797 sc->pmemlimit = PCI_PPBMEMLIMIT( 798 pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow); 799 else 800 sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow); 801 } 802 803 /* 804 * Restore previous I/O decode. 805 */ 806 static void 807 pcib_set_io_decode(struct pcib_softc *sc) 808 { 809 device_t dev; 810 uint32_t iohi; 811 812 dev = sc->dev; 813 814 iohi = sc->iobase >> 16; 815 if (iohi > 0) 816 pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2); 817 pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1); 818 819 iohi = sc->iolimit >> 16; 820 if (iohi > 0) 821 pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2); 822 pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1); 823 } 824 825 /* 826 * Restore previous memory decode. 827 */ 828 static void 829 pcib_set_mem_decode(struct pcib_softc *sc) 830 { 831 device_t dev; 832 pci_addr_t pmemhi; 833 834 dev = sc->dev; 835 836 pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2); 837 pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2); 838 839 pmemhi = sc->pmembase >> 32; 840 if (pmemhi > 0) 841 pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4); 842 pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2); 843 844 pmemhi = sc->pmemlimit >> 32; 845 if (pmemhi > 0) 846 pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4); 847 pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2); 848 } 849 #endif 850 851 #ifdef PCI_HP 852 /* 853 * PCI-express HotPlug support. 854 */ 855 static void 856 pcib_probe_hotplug(struct pcib_softc *sc) 857 { 858 device_t dev; 859 860 dev = sc->dev; 861 if (pci_find_cap(dev, PCIY_EXPRESS, NULL) != 0) 862 return; 863 864 if (!(pcie_read_config(dev, PCIER_FLAGS, 2) & PCIEM_FLAGS_SLOT)) 865 return; 866 867 sc->pcie_link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4); 868 sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4); 869 870 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC) 871 sc->flags |= PCIB_HOTPLUG; 872 } 873 874 /* 875 * Send a HotPlug command to the slot control register. If this slot 876 * uses command completion interrupts, these updates will be buffered 877 * while a previous command is completing. 878 */ 879 static void 880 pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask) 881 { 882 device_t dev; 883 uint16_t ctl, new; 884 885 dev = sc->dev; 886 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS) { 887 ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2); 888 new = (ctl & ~mask) | val; 889 if (new != ctl) 890 pcie_write_config(dev, PCIER_SLOT_CTL, new, 2); 891 return; 892 } 893 894 if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) { 895 sc->pcie_pending_link_ctl_val &= ~mask; 896 sc->pcie_pending_link_ctl_val |= val; 897 sc->pcie_pending_link_ctl_mask |= mask; 898 } else { 899 ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2); 900 new = (ctl & ~mask) | val; 901 if (new != ctl) { 902 pcie_write_config(dev, PCIER_SLOT_CTL, ctl, 2); 903 sc->flags |= PCIB_HOTPLUG_CMD_PENDING; 904 if (!cold) 905 callout_reset(&sc->pcie_cc_timer, hz, 906 pcib_pcie_cc_timeout, sc); 907 } 908 } 909 } 910 911 static void 912 pcib_pcie_hotplug_command_completed(struct pcib_softc *sc) 913 { 914 device_t dev; 915 uint16_t ctl, new; 916 917 dev = sc->dev; 918 919 if (bootverbose) 920 device_printf(dev, "Command Completed\n"); 921 if (!(sc->flags & PCIB_HOTPLUG_CMD_PENDING)) 922 return; 923 if (sc->pcie_pending_link_ctl_mask != 0) { 924 ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2); 925 new = ctl & ~sc->pcie_pending_link_ctl_mask; 926 new |= sc->pcie_pending_link_ctl_val; 927 if (new != ctl) { 928 pcie_write_config(dev, PCIER_SLOT_CTL, ctl, 2); 929 if (!cold) 930 callout_reset(&sc->pcie_cc_timer, hz, 931 pcib_pcie_cc_timeout, sc); 932 } else 933 sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; 934 sc->pcie_pending_link_ctl_mask = 0; 935 sc->pcie_pending_link_ctl_val = 0; 936 } else { 937 callout_stop(&sc->pcie_cc_timer); 938 sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; 939 } 940 } 941 942 /* 943 * Returns true if a card is fully inserted from the user's 944 * perspective. It may not yet be ready for access, but the driver 945 * can now start enabling access if necessary. 946 */ 947 static bool 948 pcib_hotplug_inserted(struct pcib_softc *sc) 949 { 950 951 /* Pretend the card isn't present if a detach is forced. */ 952 if (sc->flags & PCIB_DETACHING) 953 return (false); 954 955 /* Card must be present in the slot. */ 956 if ((sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS) == 0) 957 return (false); 958 959 /* A power fault implicitly turns off power to the slot. */ 960 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) 961 return (false); 962 963 /* If the MRL is disengaged, the slot is powered off. */ 964 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP && 965 (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS) != 0) 966 return (false); 967 968 return (true); 969 } 970 971 /* 972 * Returns -1 if the card is fully inserted, powered, and ready for 973 * access. Otherwise, returns 0. 974 */ 975 static int 976 pcib_hotplug_present(struct pcib_softc *sc) 977 { 978 device_t dev; 979 980 dev = sc->dev; 981 982 /* Card must be inserted. */ 983 if (!pcib_hotplug_inserted(sc)) 984 return (0); 985 986 /* 987 * Require the Electromechanical Interlock to be engaged if 988 * present. 989 */ 990 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP && 991 (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) == 0) 992 return (0); 993 994 /* Require the Data Link Layer to be active. */ 995 if (sc->pcie_link_cap & PCIEM_LINK_CAP_DL_ACTIVE) { 996 if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)) 997 return (0); 998 } 999 1000 return (-1); 1001 } 1002 1003 static void 1004 pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask, 1005 bool schedule_task) 1006 { 1007 bool card_inserted; 1008 1009 /* Clear DETACHING if Present Detect has cleared. */ 1010 if ((sc->pcie_slot_sta & (PCIEM_SLOT_STA_PDC | PCIEM_SLOT_STA_PDS)) == 1011 PCIEM_SLOT_STA_PDC) 1012 sc->flags &= ~PCIB_DETACHING; 1013 1014 card_inserted = pcib_hotplug_inserted(sc); 1015 1016 /* Turn the power indicator on if a card is inserted. */ 1017 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PIP) { 1018 mask |= PCIEM_SLOT_CTL_PIC; 1019 if (card_inserted) 1020 val |= PCIEM_SLOT_CTL_PI_ON; 1021 else if (sc->flags & PCIB_DETACH_PENDING) 1022 val |= PCIEM_SLOT_CTL_PI_BLINK; 1023 else 1024 val |= PCIEM_SLOT_CTL_PI_OFF; 1025 } 1026 1027 /* Turn the power on via the Power Controller if a card is inserted. */ 1028 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) { 1029 mask |= PCIEM_SLOT_CTL_PCC; 1030 if (card_inserted) 1031 val |= PCIEM_SLOT_CTL_PC_ON; 1032 else 1033 val |= PCIEM_SLOT_CTL_PC_OFF; 1034 } 1035 1036 /* 1037 * If a card is inserted, enable the Electromechanical 1038 * Interlock. If a card is not inserted (or we are in the 1039 * process of detaching), disable the Electromechanical 1040 * Interlock. 1041 */ 1042 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP) { 1043 if (card_inserted != 1044 !(sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS)) { 1045 mask |= PCIEM_SLOT_CTL_EIC; 1046 val |= PCIEM_SLOT_CTL_EIC; 1047 } 1048 } 1049 1050 /* 1051 * Start a timer to see if the Data Link Layer times out. 1052 * Note that we only start the timer if Presence Detect 1053 * changed on this interrupt. Stop any scheduled timer if 1054 * the Data Link Layer is active. 1055 */ 1056 if (sc->pcie_link_cap & PCIEM_LINK_CAP_DL_ACTIVE) { 1057 if (card_inserted && 1058 !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) && 1059 sc->pcie_slot_sta & PCIEM_SLOT_STA_PDC) { 1060 if (cold) 1061 device_printf(sc->dev, 1062 "Data Link Layer inactive\n"); 1063 else 1064 callout_reset(&sc->pcie_dll_timer, hz, 1065 pcib_pcie_dll_timeout, sc); 1066 } else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) 1067 callout_stop(&sc->pcie_dll_timer); 1068 } 1069 1070 pcib_pcie_hotplug_command(sc, val, mask); 1071 1072 /* 1073 * During attach the child "pci" device is added sychronously; 1074 * otherwise, the task is scheduled to manage the child 1075 * device. 1076 */ 1077 if (schedule_task && 1078 (pcib_hotplug_present(sc) != 0) != (sc->child != NULL)) 1079 taskqueue_enqueue(taskqueue_thread, &sc->pcie_hp_task); 1080 } 1081 1082 static void 1083 pcib_pcie_intr(void *arg) 1084 { 1085 struct pcib_softc *sc; 1086 device_t dev; 1087 1088 sc = arg; 1089 dev = sc->dev; 1090 sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); 1091 1092 /* Clear the events just reported. */ 1093 pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); 1094 1095 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_ABP) { 1096 if (sc->flags & PCIB_DETACH_PENDING) { 1097 device_printf(dev, 1098 "Attention Button Pressed: Detach Cancelled\n"); 1099 sc->flags &= ~PCIB_DETACH_PENDING; 1100 callout_stop(&sc->pcie_ab_timer); 1101 } else { 1102 device_printf(dev, 1103 "Attention Button Pressed: Detaching in 5 seconds\n"); 1104 sc->flags |= PCIB_DETACH_PENDING; 1105 callout_reset(&sc->pcie_ab_timer, 5 * hz, 1106 pcib_pcie_ab_timeout, sc); 1107 } 1108 } 1109 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) 1110 device_printf(dev, "Power Fault Detected\n"); 1111 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSC) 1112 device_printf(dev, "MRL Sensor Changed to %s\n", 1113 sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS ? "open" : 1114 "closed"); 1115 if (bootverbose && sc->pcie_slot_sta & PCIEM_SLOT_STA_PDC) 1116 device_printf(dev, "Present Detect Changed to %s\n", 1117 sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS ? "card present" : 1118 "empty"); 1119 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_CC) 1120 pcib_pcie_hotplug_command_completed(sc); 1121 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_DLLSC) { 1122 sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); 1123 if (bootverbose) 1124 device_printf(dev, 1125 "Data Link Layer State Changed to %s\n", 1126 sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE ? 1127 "active" : "inactive"); 1128 } 1129 1130 pcib_pcie_hotplug_update(sc, 0, 0, true); 1131 } 1132 1133 static void 1134 pcib_pcie_hotplug_task(void *context, int pending) 1135 { 1136 struct pcib_softc *sc; 1137 device_t dev; 1138 1139 sc = context; 1140 mtx_lock(&Giant); 1141 dev = sc->dev; 1142 if (pcib_hotplug_present(sc) != 0) { 1143 if (sc->child == NULL) { 1144 sc->child = device_add_child(dev, "pci", -1); 1145 bus_generic_attach(dev); 1146 } 1147 } else { 1148 if (sc->child != NULL) { 1149 if (device_delete_child(dev, sc->child) == 0) 1150 sc->child = NULL; 1151 } 1152 } 1153 mtx_unlock(&Giant); 1154 } 1155 1156 static void 1157 pcib_pcie_ab_timeout(void *arg) 1158 { 1159 struct pcib_softc *sc; 1160 device_t dev; 1161 1162 sc = arg; 1163 dev = sc->dev; 1164 mtx_assert(&Giant, MA_OWNED); 1165 if (sc->flags & PCIB_DETACH_PENDING) { 1166 sc->flags |= PCIB_DETACHING; 1167 sc->flags &= ~PCIB_DETACH_PENDING; 1168 pcib_pcie_hotplug_update(sc, 0, 0, true); 1169 } 1170 } 1171 1172 static void 1173 pcib_pcie_cc_timeout(void *arg) 1174 { 1175 struct pcib_softc *sc; 1176 device_t dev; 1177 1178 sc = arg; 1179 dev = sc->dev; 1180 mtx_assert(&Giant, MA_OWNED); 1181 if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) { 1182 device_printf(dev, 1183 "Hotplug Command Timed Out - forcing detach\n"); 1184 sc->flags &= ~(PCIB_HOTPLUG_CMD_PENDING | PCIB_DETACH_PENDING); 1185 sc->flags |= PCIB_DETACHING; 1186 pcib_pcie_hotplug_update(sc, 0, 0, true); 1187 } 1188 } 1189 1190 static void 1191 pcib_pcie_dll_timeout(void *arg) 1192 { 1193 struct pcib_softc *sc; 1194 device_t dev; 1195 uint16_t sta; 1196 1197 sc = arg; 1198 dev = sc->dev; 1199 mtx_assert(&Giant, MA_OWNED); 1200 sta = pcie_read_config(dev, PCIER_LINK_STA, 2); 1201 if (!(sta & PCIEM_LINK_STA_DL_ACTIVE)) { 1202 device_printf(dev, 1203 "Timed out waiting for Data Link Layer Active\n"); 1204 sc->flags |= PCIB_DETACHING; 1205 pcib_pcie_hotplug_update(sc, 0, 0, true); 1206 } else if (sta != sc->pcie_link_sta) { 1207 device_printf(dev, 1208 "Missed HotPlug interrupt waiting for DLL Active\n"); 1209 pcib_pcie_intr(sc); 1210 } 1211 } 1212 1213 static int 1214 pcib_alloc_pcie_irq(struct pcib_softc *sc) 1215 { 1216 device_t dev; 1217 int count, error, rid; 1218 1219 rid = -1; 1220 dev = sc->dev; 1221 1222 /* 1223 * For simplicity, only use MSI-X if there is a single message. 1224 * To support a device with multiple messages we would have to 1225 * use remap intr if the MSI number is not 0. 1226 */ 1227 count = pci_msix_count(dev); 1228 if (count == 1) { 1229 error = pci_alloc_msix(dev, &count); 1230 if (error == 0) 1231 rid = 1; 1232 } 1233 1234 if (rid < 0 && pci_msi_count(dev) > 0) { 1235 count = 1; 1236 error = pci_alloc_msi(dev, &count); 1237 if (error == 0) 1238 rid = 1; 1239 } 1240 1241 if (rid < 0) 1242 rid = 0; 1243 1244 sc->pcie_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1245 RF_ACTIVE); 1246 if (sc->pcie_irq == NULL) { 1247 device_printf(dev, 1248 "Failed to allocate interrupt for PCI-e events\n"); 1249 if (rid > 0) 1250 pci_release_msi(dev); 1251 return (ENXIO); 1252 } 1253 1254 error = bus_setup_intr(dev, sc->pcie_irq, INTR_TYPE_MISC, 1255 NULL, pcib_pcie_intr, sc, &sc->pcie_ihand); 1256 if (error) { 1257 device_printf(dev, "Failed to setup PCI-e interrupt handler\n"); 1258 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->pcie_irq); 1259 if (rid > 0) 1260 pci_release_msi(dev); 1261 return (error); 1262 } 1263 return (0); 1264 } 1265 1266 static void 1267 pcib_setup_hotplug(struct pcib_softc *sc) 1268 { 1269 device_t dev; 1270 uint16_t mask, val; 1271 1272 dev = sc->dev; 1273 callout_init(&sc->pcie_ab_timer, 0); 1274 callout_init(&sc->pcie_cc_timer, 0); 1275 callout_init(&sc->pcie_dll_timer, 0); 1276 TASK_INIT(&sc->pcie_hp_task, 0, pcib_pcie_hotplug_task, sc); 1277 1278 /* Allocate IRQ. */ 1279 if (pcib_alloc_pcie_irq(sc) != 0) 1280 return; 1281 1282 sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); 1283 sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); 1284 1285 /* Enable HotPlug events. */ 1286 mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | 1287 PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | 1288 PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; 1289 val = PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_HPIE; 1290 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_APB) 1291 val |= PCIEM_SLOT_CTL_ABPE; 1292 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) 1293 val |= PCIEM_SLOT_CTL_PFDE; 1294 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) 1295 val |= PCIEM_SLOT_CTL_MRLSCE; 1296 if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS)) 1297 val |= PCIEM_SLOT_CTL_CCIE; 1298 if (sc->pcie_link_cap & PCIEM_LINK_CAP_DL_ACTIVE) 1299 val |= PCIEM_SLOT_CTL_DLLSCE; 1300 1301 /* Turn the attention indicator off. */ 1302 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { 1303 mask |= PCIEM_SLOT_CTL_AIC; 1304 val |= PCIEM_SLOT_CTL_AI_OFF; 1305 } 1306 1307 pcib_pcie_hotplug_update(sc, val, mask, false); 1308 } 1309 #endif 1310 1311 /* 1312 * Get current bridge configuration. 1313 */ 1314 static void 1315 pcib_cfg_save(struct pcib_softc *sc) 1316 { 1317 #ifndef NEW_PCIB 1318 device_t dev; 1319 uint16_t command; 1320 1321 dev = sc->dev; 1322 1323 command = pci_read_config(dev, PCIR_COMMAND, 2); 1324 if (command & PCIM_CMD_PORTEN) 1325 pcib_get_io_decode(sc); 1326 if (command & PCIM_CMD_MEMEN) 1327 pcib_get_mem_decode(sc); 1328 #endif 1329 } 1330 1331 /* 1332 * Restore previous bridge configuration. 1333 */ 1334 static void 1335 pcib_cfg_restore(struct pcib_softc *sc) 1336 { 1337 device_t dev; 1338 #ifndef NEW_PCIB 1339 uint16_t command; 1340 #endif 1341 dev = sc->dev; 1342 1343 #ifdef NEW_PCIB 1344 pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM); 1345 #else 1346 command = pci_read_config(dev, PCIR_COMMAND, 2); 1347 if (command & PCIM_CMD_PORTEN) 1348 pcib_set_io_decode(sc); 1349 if (command & PCIM_CMD_MEMEN) 1350 pcib_set_mem_decode(sc); 1351 #endif 1352 } 1353 1354 /* 1355 * Generic device interface 1356 */ 1357 static int 1358 pcib_probe(device_t dev) 1359 { 1360 if ((pci_get_class(dev) == PCIC_BRIDGE) && 1361 (pci_get_subclass(dev) == PCIS_BRIDGE_PCI)) { 1362 device_set_desc(dev, "PCI-PCI bridge"); 1363 return(-10000); 1364 } 1365 return(ENXIO); 1366 } 1367 1368 void 1369 pcib_attach_common(device_t dev) 1370 { 1371 struct pcib_softc *sc; 1372 struct sysctl_ctx_list *sctx; 1373 struct sysctl_oid *soid; 1374 int comma; 1375 1376 sc = device_get_softc(dev); 1377 sc->dev = dev; 1378 1379 /* 1380 * Get current bridge configuration. 1381 */ 1382 sc->domain = pci_get_domain(dev); 1383 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) 1384 sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1); 1385 sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); 1386 #endif 1387 sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); 1388 pcib_cfg_save(sc); 1389 1390 /* 1391 * The primary bus register should always be the bus of the 1392 * parent. 1393 */ 1394 sc->pribus = pci_get_bus(dev); 1395 pci_write_config(dev, PCIR_PRIBUS_1, sc->pribus, 1); 1396 1397 /* 1398 * Setup sysctl reporting nodes 1399 */ 1400 sctx = device_get_sysctl_ctx(dev); 1401 soid = device_get_sysctl_tree(dev); 1402 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", 1403 CTLFLAG_RD, &sc->domain, 0, "Domain number"); 1404 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", 1405 CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); 1406 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", 1407 CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); 1408 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", 1409 CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); 1410 1411 /* 1412 * Quirk handling. 1413 */ 1414 switch (pci_get_devid(dev)) { 1415 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) 1416 case 0x12258086: /* Intel 82454KX/GX (Orion) */ 1417 { 1418 uint8_t supbus; 1419 1420 supbus = pci_read_config(dev, 0x41, 1); 1421 if (supbus != 0xff) { 1422 sc->bus.sec = supbus + 1; 1423 sc->bus.sub = supbus + 1; 1424 } 1425 break; 1426 } 1427 #endif 1428 1429 /* 1430 * The i82380FB mobile docking controller is a PCI-PCI bridge, 1431 * and it is a subtractive bridge. However, the ProgIf is wrong 1432 * so the normal setting of PCIB_SUBTRACTIVE bit doesn't 1433 * happen. There are also Toshiba and Cavium ThunderX bridges 1434 * that behave this way. 1435 */ 1436 case 0xa002177d: /* Cavium ThunderX */ 1437 case 0x124b8086: /* Intel 82380FB Mobile */ 1438 case 0x060513d7: /* Toshiba ???? */ 1439 sc->flags |= PCIB_SUBTRACTIVE; 1440 break; 1441 1442 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) 1443 /* Compaq R3000 BIOS sets wrong subordinate bus number. */ 1444 case 0x00dd10de: 1445 { 1446 char *cp; 1447 1448 if ((cp = kern_getenv("smbios.planar.maker")) == NULL) 1449 break; 1450 if (strncmp(cp, "Compal", 6) != 0) { 1451 freeenv(cp); 1452 break; 1453 } 1454 freeenv(cp); 1455 if ((cp = kern_getenv("smbios.planar.product")) == NULL) 1456 break; 1457 if (strncmp(cp, "08A0", 4) != 0) { 1458 freeenv(cp); 1459 break; 1460 } 1461 freeenv(cp); 1462 if (sc->bus.sub < 0xa) { 1463 pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1); 1464 sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); 1465 } 1466 break; 1467 } 1468 #endif 1469 } 1470 1471 if (pci_msi_device_blacklisted(dev)) 1472 sc->flags |= PCIB_DISABLE_MSI; 1473 1474 if (pci_msix_device_blacklisted(dev)) 1475 sc->flags |= PCIB_DISABLE_MSIX; 1476 1477 /* 1478 * Intel 815, 845 and other chipsets say they are PCI-PCI bridges, 1479 * but have a ProgIF of 0x80. The 82801 family (AA, AB, BAM/CAM, 1480 * BA/CA/DB and E) PCI bridges are HUB-PCI bridges, in Intelese. 1481 * This means they act as if they were subtractively decoding 1482 * bridges and pass all transactions. Mark them and real ProgIf 1 1483 * parts as subtractive. 1484 */ 1485 if ((pci_get_devid(dev) & 0xff00ffff) == 0x24008086 || 1486 pci_read_config(dev, PCIR_PROGIF, 1) == PCIP_BRIDGE_PCI_SUBTRACTIVE) 1487 sc->flags |= PCIB_SUBTRACTIVE; 1488 1489 #ifdef PCI_HP 1490 pcib_probe_hotplug(sc); 1491 #endif 1492 #ifdef NEW_PCIB 1493 #ifdef PCI_RES_BUS 1494 pcib_setup_secbus(dev, &sc->bus, 1); 1495 #endif 1496 pcib_probe_windows(sc); 1497 #endif 1498 #ifdef PCI_HP 1499 if (sc->flags & PCIB_HOTPLUG) 1500 pcib_setup_hotplug(sc); 1501 #endif 1502 if (bootverbose) { 1503 device_printf(dev, " domain %d\n", sc->domain); 1504 device_printf(dev, " secondary bus %d\n", sc->bus.sec); 1505 device_printf(dev, " subordinate bus %d\n", sc->bus.sub); 1506 #ifdef NEW_PCIB 1507 if (pcib_is_window_open(&sc->io)) 1508 device_printf(dev, " I/O decode 0x%jx-0x%jx\n", 1509 (uintmax_t)sc->io.base, (uintmax_t)sc->io.limit); 1510 if (pcib_is_window_open(&sc->mem)) 1511 device_printf(dev, " memory decode 0x%jx-0x%jx\n", 1512 (uintmax_t)sc->mem.base, (uintmax_t)sc->mem.limit); 1513 if (pcib_is_window_open(&sc->pmem)) 1514 device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", 1515 (uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit); 1516 #else 1517 if (pcib_is_io_open(sc)) 1518 device_printf(dev, " I/O decode 0x%x-0x%x\n", 1519 sc->iobase, sc->iolimit); 1520 if (pcib_is_nonprefetch_open(sc)) 1521 device_printf(dev, " memory decode 0x%jx-0x%jx\n", 1522 (uintmax_t)sc->membase, (uintmax_t)sc->memlimit); 1523 if (pcib_is_prefetch_open(sc)) 1524 device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", 1525 (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); 1526 #endif 1527 if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) || 1528 sc->flags & PCIB_SUBTRACTIVE) { 1529 device_printf(dev, " special decode "); 1530 comma = 0; 1531 if (sc->bridgectl & PCIB_BCR_ISA_ENABLE) { 1532 printf("ISA"); 1533 comma = 1; 1534 } 1535 if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) { 1536 printf("%sVGA", comma ? ", " : ""); 1537 comma = 1; 1538 } 1539 if (sc->flags & PCIB_SUBTRACTIVE) 1540 printf("%ssubtractive", comma ? ", " : ""); 1541 printf("\n"); 1542 } 1543 } 1544 1545 /* 1546 * Always enable busmastering on bridges so that transactions 1547 * initiated on the secondary bus are passed through to the 1548 * primary bus. 1549 */ 1550 pci_enable_busmaster(dev); 1551 } 1552 1553 #ifdef PCI_HP 1554 static int 1555 pcib_present(struct pcib_softc *sc) 1556 { 1557 1558 if (sc->flags & PCIB_HOTPLUG) 1559 return (pcib_hotplug_present(sc) != 0); 1560 return (1); 1561 } 1562 #endif 1563 1564 int 1565 pcib_attach_child(device_t dev) 1566 { 1567 struct pcib_softc *sc; 1568 1569 sc = device_get_softc(dev); 1570 if (sc->bus.sec == 0) { 1571 /* no secondary bus; we should have fixed this */ 1572 return(0); 1573 } 1574 1575 #ifdef PCI_HP 1576 if (!pcib_present(sc)) { 1577 /* An empty HotPlug slot, so don't add a PCI bus yet. */ 1578 return (0); 1579 } 1580 #endif 1581 1582 sc->child = device_add_child(dev, "pci", -1); 1583 return (bus_generic_attach(dev)); 1584 } 1585 1586 int 1587 pcib_attach(device_t dev) 1588 { 1589 1590 pcib_attach_common(dev); 1591 return (pcib_attach_child(dev)); 1592 } 1593 1594 int 1595 pcib_suspend(device_t dev) 1596 { 1597 1598 pcib_cfg_save(device_get_softc(dev)); 1599 return (bus_generic_suspend(dev)); 1600 } 1601 1602 int 1603 pcib_resume(device_t dev) 1604 { 1605 1606 pcib_cfg_restore(device_get_softc(dev)); 1607 return (bus_generic_resume(dev)); 1608 } 1609 1610 void 1611 pcib_bridge_init(device_t dev) 1612 { 1613 pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); 1614 pci_write_config(dev, PCIR_IOBASEH_1, 0xffff, 2); 1615 pci_write_config(dev, PCIR_IOLIMITL_1, 0, 1); 1616 pci_write_config(dev, PCIR_IOLIMITH_1, 0, 2); 1617 pci_write_config(dev, PCIR_MEMBASE_1, 0xffff, 2); 1618 pci_write_config(dev, PCIR_MEMLIMIT_1, 0, 2); 1619 pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); 1620 pci_write_config(dev, PCIR_PMBASEH_1, 0xffffffff, 4); 1621 pci_write_config(dev, PCIR_PMLIMITL_1, 0, 2); 1622 pci_write_config(dev, PCIR_PMLIMITH_1, 0, 4); 1623 } 1624 1625 int 1626 pcib_child_present(device_t dev, device_t child) 1627 { 1628 #ifdef PCI_HP 1629 struct pcib_softc *sc = device_get_softc(dev); 1630 int retval; 1631 1632 retval = bus_child_present(dev); 1633 if (retval != 0 && sc->flags & PCIB_HOTPLUG) 1634 retval = pcib_hotplug_present(sc); 1635 return (retval); 1636 #else 1637 return (bus_child_present(dev)); 1638 #endif 1639 } 1640 1641 int 1642 pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1643 { 1644 struct pcib_softc *sc = device_get_softc(dev); 1645 1646 switch (which) { 1647 case PCIB_IVAR_DOMAIN: 1648 *result = sc->domain; 1649 return(0); 1650 case PCIB_IVAR_BUS: 1651 *result = sc->bus.sec; 1652 return(0); 1653 } 1654 return(ENOENT); 1655 } 1656 1657 int 1658 pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) 1659 { 1660 1661 switch (which) { 1662 case PCIB_IVAR_DOMAIN: 1663 return(EINVAL); 1664 case PCIB_IVAR_BUS: 1665 return(EINVAL); 1666 } 1667 return(ENOENT); 1668 } 1669 1670 #ifdef NEW_PCIB 1671 /* 1672 * Attempt to allocate a resource from the existing resources assigned 1673 * to a window. 1674 */ 1675 static struct resource * 1676 pcib_suballoc_resource(struct pcib_softc *sc, struct pcib_window *w, 1677 device_t child, int type, int *rid, rman_res_t start, rman_res_t end, 1678 rman_res_t count, u_int flags) 1679 { 1680 struct resource *res; 1681 1682 if (!pcib_is_window_open(w)) 1683 return (NULL); 1684 1685 res = rman_reserve_resource(&w->rman, start, end, count, 1686 flags & ~RF_ACTIVE, child); 1687 if (res == NULL) 1688 return (NULL); 1689 1690 if (bootverbose) 1691 device_printf(sc->dev, 1692 "allocated %s range (%#jx-%#jx) for rid %x of %s\n", 1693 w->name, rman_get_start(res), rman_get_end(res), *rid, 1694 pcib_child_name(child)); 1695 rman_set_rid(res, *rid); 1696 1697 /* 1698 * If the resource should be active, pass that request up the 1699 * tree. This assumes the parent drivers can handle 1700 * activating sub-allocated resources. 1701 */ 1702 if (flags & RF_ACTIVE) { 1703 if (bus_activate_resource(child, type, *rid, res) != 0) { 1704 rman_release_resource(res); 1705 return (NULL); 1706 } 1707 } 1708 1709 return (res); 1710 } 1711 1712 /* Allocate a fresh resource range for an unconfigured window. */ 1713 static int 1714 pcib_alloc_new_window(struct pcib_softc *sc, struct pcib_window *w, int type, 1715 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 1716 { 1717 struct resource *res; 1718 rman_res_t base, limit, wmask; 1719 int rid; 1720 1721 /* 1722 * If this is an I/O window on a bridge with ISA enable set 1723 * and the start address is below 64k, then try to allocate an 1724 * initial window of 0x1000 bytes long starting at address 1725 * 0xf000 and walking down. Note that if the original request 1726 * was larger than the non-aliased range size of 0x100 our 1727 * caller would have raised the start address up to 64k 1728 * already. 1729 */ 1730 if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && 1731 start < 65536) { 1732 for (base = 0xf000; (long)base >= 0; base -= 0x1000) { 1733 limit = base + 0xfff; 1734 1735 /* 1736 * Skip ranges that wouldn't work for the 1737 * original request. Note that the actual 1738 * window that overlaps are the non-alias 1739 * ranges within [base, limit], so this isn't 1740 * quite a simple comparison. 1741 */ 1742 if (start + count > limit - 0x400) 1743 continue; 1744 if (base == 0) { 1745 /* 1746 * The first open region for the window at 1747 * 0 is 0x400-0x4ff. 1748 */ 1749 if (end - count + 1 < 0x400) 1750 continue; 1751 } else { 1752 if (end - count + 1 < base) 1753 continue; 1754 } 1755 1756 if (pcib_alloc_nonisa_ranges(sc, base, limit) == 0) { 1757 w->base = base; 1758 w->limit = limit; 1759 return (0); 1760 } 1761 } 1762 return (ENOSPC); 1763 } 1764 1765 wmask = ((rman_res_t)1 << w->step) - 1; 1766 if (RF_ALIGNMENT(flags) < w->step) { 1767 flags &= ~RF_ALIGNMENT_MASK; 1768 flags |= RF_ALIGNMENT_LOG2(w->step); 1769 } 1770 start &= ~wmask; 1771 end |= wmask; 1772 count = roundup2(count, (rman_res_t)1 << w->step); 1773 rid = w->reg; 1774 res = bus_alloc_resource(sc->dev, type, &rid, start, end, count, 1775 flags & ~RF_ACTIVE); 1776 if (res == NULL) 1777 return (ENOSPC); 1778 pcib_add_window_resources(w, &res, 1); 1779 pcib_activate_window(sc, type); 1780 w->base = rman_get_start(res); 1781 w->limit = rman_get_end(res); 1782 return (0); 1783 } 1784 1785 /* Try to expand an existing window to the requested base and limit. */ 1786 static int 1787 pcib_expand_window(struct pcib_softc *sc, struct pcib_window *w, int type, 1788 rman_res_t base, rman_res_t limit) 1789 { 1790 struct resource *res; 1791 int error, i, force_64k_base; 1792 1793 KASSERT(base <= w->base && limit >= w->limit, 1794 ("attempting to shrink window")); 1795 1796 /* 1797 * XXX: pcib_grow_window() doesn't try to do this anyway and 1798 * the error handling for all the edge cases would be tedious. 1799 */ 1800 KASSERT(limit == w->limit || base == w->base, 1801 ("attempting to grow both ends of a window")); 1802 1803 /* 1804 * Yet more special handling for requests to expand an I/O 1805 * window behind an ISA-enabled bridge. Since I/O windows 1806 * have to grow in 0x1000 increments and the end of the 0xffff 1807 * range is an alias, growing a window below 64k will always 1808 * result in allocating new resources and never adjusting an 1809 * existing resource. 1810 */ 1811 if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && 1812 (limit <= 65535 || (base <= 65535 && base != w->base))) { 1813 KASSERT(limit == w->limit || limit <= 65535, 1814 ("attempting to grow both ends across 64k ISA alias")); 1815 1816 if (base != w->base) 1817 error = pcib_alloc_nonisa_ranges(sc, base, w->base - 1); 1818 else 1819 error = pcib_alloc_nonisa_ranges(sc, w->limit + 1, 1820 limit); 1821 if (error == 0) { 1822 w->base = base; 1823 w->limit = limit; 1824 } 1825 return (error); 1826 } 1827 1828 /* 1829 * Find the existing resource to adjust. Usually there is only one, 1830 * but for an ISA-enabled bridge we might be growing the I/O window 1831 * above 64k and need to find the existing resource that maps all 1832 * of the area above 64k. 1833 */ 1834 for (i = 0; i < w->count; i++) { 1835 if (rman_get_end(w->res[i]) == w->limit) 1836 break; 1837 } 1838 KASSERT(i != w->count, ("did not find existing resource")); 1839 res = w->res[i]; 1840 1841 /* 1842 * Usually the resource we found should match the window's 1843 * existing range. The one exception is the ISA-enabled case 1844 * mentioned above in which case the resource should start at 1845 * 64k. 1846 */ 1847 if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && 1848 w->base <= 65535) { 1849 KASSERT(rman_get_start(res) == 65536, 1850 ("existing resource mismatch")); 1851 force_64k_base = 1; 1852 } else { 1853 KASSERT(w->base == rman_get_start(res), 1854 ("existing resource mismatch")); 1855 force_64k_base = 0; 1856 } 1857 1858 error = bus_adjust_resource(sc->dev, type, res, force_64k_base ? 1859 rman_get_start(res) : base, limit); 1860 if (error) 1861 return (error); 1862 1863 /* Add the newly allocated region to the resource manager. */ 1864 if (w->base != base) { 1865 error = rman_manage_region(&w->rman, base, w->base - 1); 1866 w->base = base; 1867 } else { 1868 error = rman_manage_region(&w->rman, w->limit + 1, limit); 1869 w->limit = limit; 1870 } 1871 if (error) { 1872 if (bootverbose) 1873 device_printf(sc->dev, 1874 "failed to expand %s resource manager\n", w->name); 1875 (void)bus_adjust_resource(sc->dev, type, res, force_64k_base ? 1876 rman_get_start(res) : w->base, w->limit); 1877 } 1878 return (error); 1879 } 1880 1881 /* 1882 * Attempt to grow a window to make room for a given resource request. 1883 */ 1884 static int 1885 pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type, 1886 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 1887 { 1888 rman_res_t align, start_free, end_free, front, back, wmask; 1889 int error; 1890 1891 /* 1892 * Clamp the desired resource range to the maximum address 1893 * this window supports. Reject impossible requests. 1894 * 1895 * For I/O port requests behind a bridge with the ISA enable 1896 * bit set, force large allocations to start above 64k. 1897 */ 1898 if (!w->valid) 1899 return (EINVAL); 1900 if (sc->bridgectl & PCIB_BCR_ISA_ENABLE && count > 0x100 && 1901 start < 65536) 1902 start = 65536; 1903 if (end > w->rman.rm_end) 1904 end = w->rman.rm_end; 1905 if (start + count - 1 > end || start + count < start) 1906 return (EINVAL); 1907 wmask = ((rman_res_t)1 << w->step) - 1; 1908 1909 /* 1910 * If there is no resource at all, just try to allocate enough 1911 * aligned space for this resource. 1912 */ 1913 if (w->res == NULL) { 1914 error = pcib_alloc_new_window(sc, w, type, start, end, count, 1915 flags); 1916 if (error) { 1917 if (bootverbose) 1918 device_printf(sc->dev, 1919 "failed to allocate initial %s window (%#jx-%#jx,%#jx)\n", 1920 w->name, start, end, count); 1921 return (error); 1922 } 1923 if (bootverbose) 1924 device_printf(sc->dev, 1925 "allocated initial %s window of %#jx-%#jx\n", 1926 w->name, (uintmax_t)w->base, (uintmax_t)w->limit); 1927 goto updatewin; 1928 } 1929 1930 /* 1931 * See if growing the window would help. Compute the minimum 1932 * amount of address space needed on both the front and back 1933 * ends of the existing window to satisfy the allocation. 1934 * 1935 * For each end, build a candidate region adjusting for the 1936 * required alignment, etc. If there is a free region at the 1937 * edge of the window, grow from the inner edge of the free 1938 * region. Otherwise grow from the window boundary. 1939 * 1940 * Growing an I/O window below 64k for a bridge with the ISA 1941 * enable bit doesn't require any special magic as the step 1942 * size of an I/O window (1k) always includes multiple 1943 * non-alias ranges when it is grown in either direction. 1944 * 1945 * XXX: Special case: if w->res is completely empty and the 1946 * request size is larger than w->res, we should find the 1947 * optimal aligned buffer containing w->res and allocate that. 1948 */ 1949 if (bootverbose) 1950 device_printf(sc->dev, 1951 "attempting to grow %s window for (%#jx-%#jx,%#jx)\n", 1952 w->name, start, end, count); 1953 align = (rman_res_t)1 << RF_ALIGNMENT(flags); 1954 if (start < w->base) { 1955 if (rman_first_free_region(&w->rman, &start_free, &end_free) != 1956 0 || start_free != w->base) 1957 end_free = w->base; 1958 if (end_free > end) 1959 end_free = end + 1; 1960 1961 /* Move end_free down until it is properly aligned. */ 1962 end_free &= ~(align - 1); 1963 end_free--; 1964 front = end_free - (count - 1); 1965 1966 /* 1967 * The resource would now be allocated at (front, 1968 * end_free). Ensure that fits in the (start, end) 1969 * bounds. end_free is checked above. If 'front' is 1970 * ok, ensure it is properly aligned for this window. 1971 * Also check for underflow. 1972 */ 1973 if (front >= start && front <= end_free) { 1974 if (bootverbose) 1975 printf("\tfront candidate range: %#jx-%#jx\n", 1976 front, end_free); 1977 front &= ~wmask; 1978 front = w->base - front; 1979 } else 1980 front = 0; 1981 } else 1982 front = 0; 1983 if (end > w->limit) { 1984 if (rman_last_free_region(&w->rman, &start_free, &end_free) != 1985 0 || end_free != w->limit) 1986 start_free = w->limit + 1; 1987 if (start_free < start) 1988 start_free = start; 1989 1990 /* Move start_free up until it is properly aligned. */ 1991 start_free = roundup2(start_free, align); 1992 back = start_free + count - 1; 1993 1994 /* 1995 * The resource would now be allocated at (start_free, 1996 * back). Ensure that fits in the (start, end) 1997 * bounds. start_free is checked above. If 'back' is 1998 * ok, ensure it is properly aligned for this window. 1999 * Also check for overflow. 2000 */ 2001 if (back <= end && start_free <= back) { 2002 if (bootverbose) 2003 printf("\tback candidate range: %#jx-%#jx\n", 2004 start_free, back); 2005 back |= wmask; 2006 back -= w->limit; 2007 } else 2008 back = 0; 2009 } else 2010 back = 0; 2011 2012 /* 2013 * Try to allocate the smallest needed region first. 2014 * If that fails, fall back to the other region. 2015 */ 2016 error = ENOSPC; 2017 while (front != 0 || back != 0) { 2018 if (front != 0 && (front <= back || back == 0)) { 2019 error = pcib_expand_window(sc, w, type, w->base - front, 2020 w->limit); 2021 if (error == 0) 2022 break; 2023 front = 0; 2024 } else { 2025 error = pcib_expand_window(sc, w, type, w->base, 2026 w->limit + back); 2027 if (error == 0) 2028 break; 2029 back = 0; 2030 } 2031 } 2032 2033 if (error) 2034 return (error); 2035 if (bootverbose) 2036 device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", 2037 w->name, (uintmax_t)w->base, (uintmax_t)w->limit); 2038 2039 updatewin: 2040 /* Write the new window. */ 2041 KASSERT((w->base & wmask) == 0, ("start address is not aligned")); 2042 KASSERT((w->limit & wmask) == wmask, ("end address is not aligned")); 2043 pcib_write_windows(sc, w->mask); 2044 return (0); 2045 } 2046 2047 /* 2048 * We have to trap resource allocation requests and ensure that the bridge 2049 * is set up to, or capable of handling them. 2050 */ 2051 struct resource * 2052 pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, 2053 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 2054 { 2055 struct pcib_softc *sc; 2056 struct resource *r; 2057 2058 sc = device_get_softc(dev); 2059 2060 /* 2061 * VGA resources are decoded iff the VGA enable bit is set in 2062 * the bridge control register. VGA resources do not fall into 2063 * the resource windows and are passed up to the parent. 2064 */ 2065 if ((type == SYS_RES_IOPORT && pci_is_vga_ioport_range(start, end)) || 2066 (type == SYS_RES_MEMORY && pci_is_vga_memory_range(start, end))) { 2067 if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) 2068 return (bus_generic_alloc_resource(dev, child, type, 2069 rid, start, end, count, flags)); 2070 else 2071 return (NULL); 2072 } 2073 2074 switch (type) { 2075 #ifdef PCI_RES_BUS 2076 case PCI_RES_BUS: 2077 return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, 2078 count, flags)); 2079 #endif 2080 case SYS_RES_IOPORT: 2081 if (pcib_is_isa_range(sc, start, end, count)) 2082 return (NULL); 2083 r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, 2084 end, count, flags); 2085 if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) 2086 break; 2087 if (pcib_grow_window(sc, &sc->io, type, start, end, count, 2088 flags) == 0) 2089 r = pcib_suballoc_resource(sc, &sc->io, child, type, 2090 rid, start, end, count, flags); 2091 break; 2092 case SYS_RES_MEMORY: 2093 /* 2094 * For prefetchable resources, prefer the prefetchable 2095 * memory window, but fall back to the regular memory 2096 * window if that fails. Try both windows before 2097 * attempting to grow a window in case the firmware 2098 * has used a range in the regular memory window to 2099 * map a prefetchable BAR. 2100 */ 2101 if (flags & RF_PREFETCHABLE) { 2102 r = pcib_suballoc_resource(sc, &sc->pmem, child, type, 2103 rid, start, end, count, flags); 2104 if (r != NULL) 2105 break; 2106 } 2107 r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, 2108 start, end, count, flags); 2109 if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) 2110 break; 2111 if (flags & RF_PREFETCHABLE) { 2112 if (pcib_grow_window(sc, &sc->pmem, type, start, end, 2113 count, flags) == 0) { 2114 r = pcib_suballoc_resource(sc, &sc->pmem, child, 2115 type, rid, start, end, count, flags); 2116 if (r != NULL) 2117 break; 2118 } 2119 } 2120 if (pcib_grow_window(sc, &sc->mem, type, start, end, count, 2121 flags & ~RF_PREFETCHABLE) == 0) 2122 r = pcib_suballoc_resource(sc, &sc->mem, child, type, 2123 rid, start, end, count, flags); 2124 break; 2125 default: 2126 return (bus_generic_alloc_resource(dev, child, type, rid, 2127 start, end, count, flags)); 2128 } 2129 2130 /* 2131 * If attempts to suballocate from the window fail but this is a 2132 * subtractive bridge, pass the request up the tree. 2133 */ 2134 if (sc->flags & PCIB_SUBTRACTIVE && r == NULL) 2135 return (bus_generic_alloc_resource(dev, child, type, rid, 2136 start, end, count, flags)); 2137 return (r); 2138 } 2139 2140 int 2141 pcib_adjust_resource(device_t bus, device_t child, int type, struct resource *r, 2142 rman_res_t start, rman_res_t end) 2143 { 2144 struct pcib_softc *sc; 2145 2146 sc = device_get_softc(bus); 2147 if (pcib_is_resource_managed(sc, type, r)) 2148 return (rman_adjust_resource(r, start, end)); 2149 return (bus_generic_adjust_resource(bus, child, type, r, start, end)); 2150 } 2151 2152 int 2153 pcib_release_resource(device_t dev, device_t child, int type, int rid, 2154 struct resource *r) 2155 { 2156 struct pcib_softc *sc; 2157 int error; 2158 2159 sc = device_get_softc(dev); 2160 if (pcib_is_resource_managed(sc, type, r)) { 2161 if (rman_get_flags(r) & RF_ACTIVE) { 2162 error = bus_deactivate_resource(child, type, rid, r); 2163 if (error) 2164 return (error); 2165 } 2166 return (rman_release_resource(r)); 2167 } 2168 return (bus_generic_release_resource(dev, child, type, rid, r)); 2169 } 2170 #else 2171 /* 2172 * We have to trap resource allocation requests and ensure that the bridge 2173 * is set up to, or capable of handling them. 2174 */ 2175 struct resource * 2176 pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, 2177 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 2178 { 2179 struct pcib_softc *sc = device_get_softc(dev); 2180 const char *name, *suffix; 2181 int ok; 2182 2183 /* 2184 * Fail the allocation for this range if it's not supported. 2185 */ 2186 name = device_get_nameunit(child); 2187 if (name == NULL) { 2188 name = ""; 2189 suffix = ""; 2190 } else 2191 suffix = " "; 2192 switch (type) { 2193 case SYS_RES_IOPORT: 2194 ok = 0; 2195 if (!pcib_is_io_open(sc)) 2196 break; 2197 ok = (start >= sc->iobase && end <= sc->iolimit); 2198 2199 /* 2200 * Make sure we allow access to VGA I/O addresses when the 2201 * bridge has the "VGA Enable" bit set. 2202 */ 2203 if (!ok && pci_is_vga_ioport_range(start, end)) 2204 ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; 2205 2206 if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { 2207 if (!ok) { 2208 if (start < sc->iobase) 2209 start = sc->iobase; 2210 if (end > sc->iolimit) 2211 end = sc->iolimit; 2212 if (start < end) 2213 ok = 1; 2214 } 2215 } else { 2216 ok = 1; 2217 #if 0 2218 /* 2219 * If we overlap with the subtractive range, then 2220 * pick the upper range to use. 2221 */ 2222 if (start < sc->iolimit && end > sc->iobase) 2223 start = sc->iolimit + 1; 2224 #endif 2225 } 2226 if (end < start) { 2227 device_printf(dev, "ioport: end (%jx) < start (%jx)\n", 2228 end, start); 2229 start = 0; 2230 end = 0; 2231 ok = 0; 2232 } 2233 if (!ok) { 2234 device_printf(dev, "%s%srequested unsupported I/O " 2235 "range 0x%jx-0x%jx (decoding 0x%x-0x%x)\n", 2236 name, suffix, start, end, sc->iobase, sc->iolimit); 2237 return (NULL); 2238 } 2239 if (bootverbose) 2240 device_printf(dev, 2241 "%s%srequested I/O range 0x%jx-0x%jx: in range\n", 2242 name, suffix, start, end); 2243 break; 2244 2245 case SYS_RES_MEMORY: 2246 ok = 0; 2247 if (pcib_is_nonprefetch_open(sc)) 2248 ok = ok || (start >= sc->membase && end <= sc->memlimit); 2249 if (pcib_is_prefetch_open(sc)) 2250 ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit); 2251 2252 /* 2253 * Make sure we allow access to VGA memory addresses when the 2254 * bridge has the "VGA Enable" bit set. 2255 */ 2256 if (!ok && pci_is_vga_memory_range(start, end)) 2257 ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; 2258 2259 if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { 2260 if (!ok) { 2261 ok = 1; 2262 if (flags & RF_PREFETCHABLE) { 2263 if (pcib_is_prefetch_open(sc)) { 2264 if (start < sc->pmembase) 2265 start = sc->pmembase; 2266 if (end > sc->pmemlimit) 2267 end = sc->pmemlimit; 2268 } else { 2269 ok = 0; 2270 } 2271 } else { /* non-prefetchable */ 2272 if (pcib_is_nonprefetch_open(sc)) { 2273 if (start < sc->membase) 2274 start = sc->membase; 2275 if (end > sc->memlimit) 2276 end = sc->memlimit; 2277 } else { 2278 ok = 0; 2279 } 2280 } 2281 } 2282 } else if (!ok) { 2283 ok = 1; /* subtractive bridge: always ok */ 2284 #if 0 2285 if (pcib_is_nonprefetch_open(sc)) { 2286 if (start < sc->memlimit && end > sc->membase) 2287 start = sc->memlimit + 1; 2288 } 2289 if (pcib_is_prefetch_open(sc)) { 2290 if (start < sc->pmemlimit && end > sc->pmembase) 2291 start = sc->pmemlimit + 1; 2292 } 2293 #endif 2294 } 2295 if (end < start) { 2296 device_printf(dev, "memory: end (%jx) < start (%jx)\n", 2297 end, start); 2298 start = 0; 2299 end = 0; 2300 ok = 0; 2301 } 2302 if (!ok && bootverbose) 2303 device_printf(dev, 2304 "%s%srequested unsupported memory range %#jx-%#jx " 2305 "(decoding %#jx-%#jx, %#jx-%#jx)\n", 2306 name, suffix, start, end, 2307 (uintmax_t)sc->membase, (uintmax_t)sc->memlimit, 2308 (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); 2309 if (!ok) 2310 return (NULL); 2311 if (bootverbose) 2312 device_printf(dev,"%s%srequested memory range " 2313 "0x%jx-0x%jx: good\n", 2314 name, suffix, start, end); 2315 break; 2316 2317 default: 2318 break; 2319 } 2320 /* 2321 * Bridge is OK decoding this resource, so pass it up. 2322 */ 2323 return (bus_generic_alloc_resource(dev, child, type, rid, start, end, 2324 count, flags)); 2325 } 2326 #endif 2327 2328 /* 2329 * If ARI is enabled on this downstream port, translate the function number 2330 * to the non-ARI slot/function. The downstream port will convert it back in 2331 * hardware. If ARI is not enabled slot and func are not modified. 2332 */ 2333 static __inline void 2334 pcib_xlate_ari(device_t pcib, int bus, int *slot, int *func) 2335 { 2336 struct pcib_softc *sc; 2337 int ari_func; 2338 2339 sc = device_get_softc(pcib); 2340 ari_func = *func; 2341 2342 if (sc->flags & PCIB_ENABLE_ARI) { 2343 KASSERT(*slot == 0, 2344 ("Non-zero slot number with ARI enabled!")); 2345 *slot = PCIE_ARI_SLOT(ari_func); 2346 *func = PCIE_ARI_FUNC(ari_func); 2347 } 2348 } 2349 2350 2351 static void 2352 pcib_enable_ari(struct pcib_softc *sc, uint32_t pcie_pos) 2353 { 2354 uint32_t ctl2; 2355 2356 ctl2 = pci_read_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, 4); 2357 ctl2 |= PCIEM_CTL2_ARI; 2358 pci_write_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, ctl2, 4); 2359 2360 sc->flags |= PCIB_ENABLE_ARI; 2361 } 2362 2363 /* 2364 * PCIB interface. 2365 */ 2366 int 2367 pcib_maxslots(device_t dev) 2368 { 2369 return (PCI_SLOTMAX); 2370 } 2371 2372 static int 2373 pcib_ari_maxslots(device_t dev) 2374 { 2375 struct pcib_softc *sc; 2376 2377 sc = device_get_softc(dev); 2378 2379 if (sc->flags & PCIB_ENABLE_ARI) 2380 return (PCIE_ARI_SLOTMAX); 2381 else 2382 return (PCI_SLOTMAX); 2383 } 2384 2385 static int 2386 pcib_ari_maxfuncs(device_t dev) 2387 { 2388 struct pcib_softc *sc; 2389 2390 sc = device_get_softc(dev); 2391 2392 if (sc->flags & PCIB_ENABLE_ARI) 2393 return (PCIE_ARI_FUNCMAX); 2394 else 2395 return (PCI_FUNCMAX); 2396 } 2397 2398 static void 2399 pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, 2400 int *func) 2401 { 2402 struct pcib_softc *sc; 2403 2404 sc = device_get_softc(pcib); 2405 2406 *bus = PCI_RID2BUS(rid); 2407 if (sc->flags & PCIB_ENABLE_ARI) { 2408 *slot = PCIE_ARI_RID2SLOT(rid); 2409 *func = PCIE_ARI_RID2FUNC(rid); 2410 } else { 2411 *slot = PCI_RID2SLOT(rid); 2412 *func = PCI_RID2FUNC(rid); 2413 } 2414 } 2415 2416 /* 2417 * Since we are a child of a PCI bus, its parent must support the pcib interface. 2418 */ 2419 static uint32_t 2420 pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) 2421 { 2422 #ifdef PCI_HP 2423 struct pcib_softc *sc; 2424 2425 sc = device_get_softc(dev); 2426 if (!pcib_present(sc)) { 2427 switch (width) { 2428 case 2: 2429 return (0xffff); 2430 case 1: 2431 return (0xff); 2432 default: 2433 return (0xffffffff); 2434 } 2435 } 2436 #endif 2437 pcib_xlate_ari(dev, b, &s, &f); 2438 return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s, 2439 f, reg, width)); 2440 } 2441 2442 static void 2443 pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) 2444 { 2445 #ifdef PCI_HP 2446 struct pcib_softc *sc; 2447 2448 sc = device_get_softc(dev); 2449 if (!pcib_present(sc)) 2450 return; 2451 #endif 2452 pcib_xlate_ari(dev, b, &s, &f); 2453 PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, 2454 reg, val, width); 2455 } 2456 2457 /* 2458 * Route an interrupt across a PCI bridge. 2459 */ 2460 int 2461 pcib_route_interrupt(device_t pcib, device_t dev, int pin) 2462 { 2463 device_t bus; 2464 int parent_intpin; 2465 int intnum; 2466 2467 /* 2468 * 2469 * The PCI standard defines a swizzle of the child-side device/intpin to 2470 * the parent-side intpin as follows. 2471 * 2472 * device = device on child bus 2473 * child_intpin = intpin on child bus slot (0-3) 2474 * parent_intpin = intpin on parent bus slot (0-3) 2475 * 2476 * parent_intpin = (device + child_intpin) % 4 2477 */ 2478 parent_intpin = (pci_get_slot(dev) + (pin - 1)) % 4; 2479 2480 /* 2481 * Our parent is a PCI bus. Its parent must export the pcib interface 2482 * which includes the ability to route interrupts. 2483 */ 2484 bus = device_get_parent(pcib); 2485 intnum = PCIB_ROUTE_INTERRUPT(device_get_parent(bus), pcib, parent_intpin + 1); 2486 if (PCI_INTERRUPT_VALID(intnum) && bootverbose) { 2487 device_printf(pcib, "slot %d INT%c is routed to irq %d\n", 2488 pci_get_slot(dev), 'A' + pin - 1, intnum); 2489 } 2490 return(intnum); 2491 } 2492 2493 /* Pass request to alloc MSI/MSI-X messages up to the parent bridge. */ 2494 int 2495 pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) 2496 { 2497 struct pcib_softc *sc = device_get_softc(pcib); 2498 device_t bus; 2499 2500 if (sc->flags & PCIB_DISABLE_MSI) 2501 return (ENXIO); 2502 bus = device_get_parent(pcib); 2503 return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, 2504 irqs)); 2505 } 2506 2507 /* Pass request to release MSI/MSI-X messages up to the parent bridge. */ 2508 int 2509 pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) 2510 { 2511 device_t bus; 2512 2513 bus = device_get_parent(pcib); 2514 return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); 2515 } 2516 2517 /* Pass request to alloc an MSI-X message up to the parent bridge. */ 2518 int 2519 pcib_alloc_msix(device_t pcib, device_t dev, int *irq) 2520 { 2521 struct pcib_softc *sc = device_get_softc(pcib); 2522 device_t bus; 2523 2524 if (sc->flags & PCIB_DISABLE_MSIX) 2525 return (ENXIO); 2526 bus = device_get_parent(pcib); 2527 return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); 2528 } 2529 2530 /* Pass request to release an MSI-X message up to the parent bridge. */ 2531 int 2532 pcib_release_msix(device_t pcib, device_t dev, int irq) 2533 { 2534 device_t bus; 2535 2536 bus = device_get_parent(pcib); 2537 return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); 2538 } 2539 2540 /* Pass request to map MSI/MSI-X message up to parent bridge. */ 2541 int 2542 pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, 2543 uint32_t *data) 2544 { 2545 device_t bus; 2546 int error; 2547 2548 bus = device_get_parent(pcib); 2549 error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); 2550 if (error) 2551 return (error); 2552 2553 pci_ht_map_msi(pcib, *addr); 2554 return (0); 2555 } 2556 2557 /* Pass request for device power state up to parent bridge. */ 2558 int 2559 pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate) 2560 { 2561 device_t bus; 2562 2563 bus = device_get_parent(pcib); 2564 return (PCIB_POWER_FOR_SLEEP(bus, dev, pstate)); 2565 } 2566 2567 static int 2568 pcib_ari_enabled(device_t pcib) 2569 { 2570 struct pcib_softc *sc; 2571 2572 sc = device_get_softc(pcib); 2573 2574 return ((sc->flags & PCIB_ENABLE_ARI) != 0); 2575 } 2576 2577 static uint16_t 2578 pcib_ari_get_rid(device_t pcib, device_t dev) 2579 { 2580 struct pcib_softc *sc; 2581 uint8_t bus, slot, func; 2582 2583 sc = device_get_softc(pcib); 2584 2585 if (sc->flags & PCIB_ENABLE_ARI) { 2586 bus = pci_get_bus(dev); 2587 func = pci_get_function(dev); 2588 2589 return (PCI_ARI_RID(bus, func)); 2590 } else { 2591 bus = pci_get_bus(dev); 2592 slot = pci_get_slot(dev); 2593 func = pci_get_function(dev); 2594 2595 return (PCI_RID(bus, slot, func)); 2596 } 2597 } 2598 2599 /* 2600 * Check that the downstream port (pcib) and the endpoint device (dev) both 2601 * support ARI. If so, enable it and return 0, otherwise return an error. 2602 */ 2603 static int 2604 pcib_try_enable_ari(device_t pcib, device_t dev) 2605 { 2606 struct pcib_softc *sc; 2607 int error; 2608 uint32_t cap2; 2609 int ari_cap_off; 2610 uint32_t ari_ver; 2611 uint32_t pcie_pos; 2612 2613 sc = device_get_softc(pcib); 2614 2615 /* 2616 * ARI is controlled in a register in the PCIe capability structure. 2617 * If the downstream port does not have the PCIe capability structure 2618 * then it does not support ARI. 2619 */ 2620 error = pci_find_cap(pcib, PCIY_EXPRESS, &pcie_pos); 2621 if (error != 0) 2622 return (ENODEV); 2623 2624 /* Check that the PCIe port advertises ARI support. */ 2625 cap2 = pci_read_config(pcib, pcie_pos + PCIER_DEVICE_CAP2, 4); 2626 if (!(cap2 & PCIEM_CAP2_ARI)) 2627 return (ENODEV); 2628 2629 /* 2630 * Check that the endpoint device advertises ARI support via the ARI 2631 * extended capability structure. 2632 */ 2633 error = pci_find_extcap(dev, PCIZ_ARI, &ari_cap_off); 2634 if (error != 0) 2635 return (ENODEV); 2636 2637 /* 2638 * Finally, check that the endpoint device supports the same version 2639 * of ARI that we do. 2640 */ 2641 ari_ver = pci_read_config(dev, ari_cap_off, 4); 2642 if (PCI_EXTCAP_VER(ari_ver) != PCIB_SUPPORTED_ARI_VER) { 2643 if (bootverbose) 2644 device_printf(pcib, 2645 "Unsupported version of ARI (%d) detected\n", 2646 PCI_EXTCAP_VER(ari_ver)); 2647 2648 return (ENXIO); 2649 } 2650 2651 pcib_enable_ari(sc, pcie_pos); 2652 2653 return (0); 2654 } 2655