1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1994,1995 Stefan Esser, Wolfgang StanglMeier 5 * Copyright (c) 2000 Michael Smith <msmith@freebsd.org> 6 * Copyright (c) 2000 BSDi 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 /* 35 * PCI:PCI bridge support. 36 */ 37 38 #include "opt_pci.h" 39 40 #include <sys/param.h> 41 #include <sys/bus.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/module.h> 46 #include <sys/mutex.h> 47 #include <sys/pciio.h> 48 #include <sys/rman.h> 49 #include <sys/sysctl.h> 50 #include <sys/systm.h> 51 #include <sys/taskqueue.h> 52 53 #include <dev/pci/pcivar.h> 54 #include <dev/pci/pcireg.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pcib_private.h> 57 58 #include "pcib_if.h" 59 60 static int pcib_probe(device_t dev); 61 static int pcib_suspend(device_t dev); 62 static int pcib_resume(device_t dev); 63 64 static bus_child_present_t pcib_child_present; 65 static bus_alloc_resource_t pcib_alloc_resource; 66 #ifdef NEW_PCIB 67 static bus_adjust_resource_t pcib_adjust_resource; 68 static bus_release_resource_t pcib_release_resource; 69 #endif 70 static int pcib_reset_child(device_t dev, device_t child, int flags); 71 72 static int pcib_power_for_sleep(device_t pcib, device_t dev, 73 int *pstate); 74 static int pcib_ari_get_id(device_t pcib, device_t dev, 75 enum pci_id_type type, uintptr_t *id); 76 static uint32_t pcib_read_config(device_t dev, u_int b, u_int s, 77 u_int f, u_int reg, int width); 78 static void pcib_write_config(device_t dev, u_int b, u_int s, 79 u_int f, u_int reg, uint32_t val, int width); 80 static int pcib_ari_maxslots(device_t dev); 81 static int pcib_ari_maxfuncs(device_t dev); 82 static int pcib_try_enable_ari(device_t pcib, device_t dev); 83 static int pcib_ari_enabled(device_t pcib); 84 static void pcib_ari_decode_rid(device_t pcib, uint16_t rid, 85 int *bus, int *slot, int *func); 86 #ifdef PCI_HP 87 static void pcib_pcie_ab_timeout(void *arg, int pending); 88 static void pcib_pcie_cc_timeout(void *arg, int pending); 89 static void pcib_pcie_dll_timeout(void *arg, int pending); 90 #endif 91 static int pcib_request_feature_default(device_t pcib, device_t dev, 92 enum pci_feature feature); 93 94 static device_method_t pcib_methods[] = { 95 /* Device interface */ 96 DEVMETHOD(device_probe, pcib_probe), 97 DEVMETHOD(device_attach, pcib_attach), 98 DEVMETHOD(device_detach, pcib_detach), 99 DEVMETHOD(device_shutdown, bus_generic_shutdown), 100 DEVMETHOD(device_suspend, pcib_suspend), 101 DEVMETHOD(device_resume, pcib_resume), 102 103 /* Bus interface */ 104 DEVMETHOD(bus_child_present, pcib_child_present), 105 DEVMETHOD(bus_read_ivar, pcib_read_ivar), 106 DEVMETHOD(bus_write_ivar, pcib_write_ivar), 107 DEVMETHOD(bus_alloc_resource, pcib_alloc_resource), 108 #ifdef NEW_PCIB 109 DEVMETHOD(bus_adjust_resource, pcib_adjust_resource), 110 DEVMETHOD(bus_release_resource, pcib_release_resource), 111 #else 112 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), 113 DEVMETHOD(bus_release_resource, bus_generic_release_resource), 114 #endif 115 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 116 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 117 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 118 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 119 DEVMETHOD(bus_reset_child, pcib_reset_child), 120 121 /* pcib interface */ 122 DEVMETHOD(pcib_maxslots, pcib_ari_maxslots), 123 DEVMETHOD(pcib_maxfuncs, pcib_ari_maxfuncs), 124 DEVMETHOD(pcib_read_config, pcib_read_config), 125 DEVMETHOD(pcib_write_config, pcib_write_config), 126 DEVMETHOD(pcib_route_interrupt, pcib_route_interrupt), 127 DEVMETHOD(pcib_alloc_msi, pcib_alloc_msi), 128 DEVMETHOD(pcib_release_msi, pcib_release_msi), 129 DEVMETHOD(pcib_alloc_msix, pcib_alloc_msix), 130 DEVMETHOD(pcib_release_msix, pcib_release_msix), 131 DEVMETHOD(pcib_map_msi, pcib_map_msi), 132 DEVMETHOD(pcib_power_for_sleep, pcib_power_for_sleep), 133 DEVMETHOD(pcib_get_id, pcib_ari_get_id), 134 DEVMETHOD(pcib_try_enable_ari, pcib_try_enable_ari), 135 DEVMETHOD(pcib_ari_enabled, pcib_ari_enabled), 136 DEVMETHOD(pcib_decode_rid, pcib_ari_decode_rid), 137 DEVMETHOD(pcib_request_feature, pcib_request_feature_default), 138 139 DEVMETHOD_END 140 }; 141 142 DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc)); 143 EARLY_DRIVER_MODULE(pcib, pci, pcib_driver, NULL, NULL, BUS_PASS_BUS); 144 145 #if defined(NEW_PCIB) || defined(PCI_HP) 146 SYSCTL_DECL(_hw_pci); 147 #endif 148 149 #ifdef NEW_PCIB 150 static int pci_clear_pcib; 151 SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0, 152 "Clear firmware-assigned resources for PCI-PCI bridge I/O windows."); 153 154 /* 155 * Get the corresponding window if this resource from a child device was 156 * sub-allocated from one of our window resource managers. 157 */ 158 static struct pcib_window * 159 pcib_get_resource_window(struct pcib_softc *sc, int type, struct resource *r) 160 { 161 switch (type) { 162 case SYS_RES_IOPORT: 163 if (rman_is_region_manager(r, &sc->io.rman)) 164 return (&sc->io); 165 break; 166 case SYS_RES_MEMORY: 167 /* Prefetchable resources may live in either memory rman. */ 168 if (rman_get_flags(r) & RF_PREFETCHABLE && 169 rman_is_region_manager(r, &sc->pmem.rman)) 170 return (&sc->pmem); 171 if (rman_is_region_manager(r, &sc->mem.rman)) 172 return (&sc->mem); 173 break; 174 } 175 return (NULL); 176 } 177 178 /* 179 * Is a resource from a child device sub-allocated from one of our 180 * resource managers? 181 */ 182 static int 183 pcib_is_resource_managed(struct pcib_softc *sc, int type, struct resource *r) 184 { 185 186 #ifdef PCI_RES_BUS 187 if (type == PCI_RES_BUS) 188 return (rman_is_region_manager(r, &sc->bus.rman)); 189 #endif 190 return (pcib_get_resource_window(sc, type, r) != NULL); 191 } 192 193 static int 194 pcib_is_window_open(struct pcib_window *pw) 195 { 196 197 return (pw->valid && pw->base < pw->limit); 198 } 199 200 /* 201 * XXX: If RF_ACTIVE did not also imply allocating a bus space tag and 202 * handle for the resource, we could pass RF_ACTIVE up to the PCI bus 203 * when allocating the resource windows and rely on the PCI bus driver 204 * to do this for us. 205 */ 206 static void 207 pcib_activate_window(struct pcib_softc *sc, int type) 208 { 209 210 PCI_ENABLE_IO(device_get_parent(sc->dev), sc->dev, type); 211 } 212 213 static void 214 pcib_write_windows(struct pcib_softc *sc, int mask) 215 { 216 device_t dev; 217 uint32_t val; 218 219 dev = sc->dev; 220 if (sc->io.valid && mask & WIN_IO) { 221 val = pci_read_config(dev, PCIR_IOBASEL_1, 1); 222 if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { 223 pci_write_config(dev, PCIR_IOBASEH_1, 224 sc->io.base >> 16, 2); 225 pci_write_config(dev, PCIR_IOLIMITH_1, 226 sc->io.limit >> 16, 2); 227 } 228 pci_write_config(dev, PCIR_IOBASEL_1, sc->io.base >> 8, 1); 229 pci_write_config(dev, PCIR_IOLIMITL_1, sc->io.limit >> 8, 1); 230 } 231 232 if (mask & WIN_MEM) { 233 pci_write_config(dev, PCIR_MEMBASE_1, sc->mem.base >> 16, 2); 234 pci_write_config(dev, PCIR_MEMLIMIT_1, sc->mem.limit >> 16, 2); 235 } 236 237 if (sc->pmem.valid && mask & WIN_PMEM) { 238 val = pci_read_config(dev, PCIR_PMBASEL_1, 2); 239 if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { 240 pci_write_config(dev, PCIR_PMBASEH_1, 241 sc->pmem.base >> 32, 4); 242 pci_write_config(dev, PCIR_PMLIMITH_1, 243 sc->pmem.limit >> 32, 4); 244 } 245 pci_write_config(dev, PCIR_PMBASEL_1, sc->pmem.base >> 16, 2); 246 pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmem.limit >> 16, 2); 247 } 248 } 249 250 /* 251 * This is used to reject I/O port allocations that conflict with an 252 * ISA alias range. 253 */ 254 static int 255 pcib_is_isa_range(struct pcib_softc *sc, rman_res_t start, rman_res_t end, 256 rman_res_t count) 257 { 258 rman_res_t next_alias; 259 260 if (!(sc->bridgectl & PCIB_BCR_ISA_ENABLE)) 261 return (0); 262 263 /* Only check fixed ranges for overlap. */ 264 if (start + count - 1 != end) 265 return (0); 266 267 /* ISA aliases are only in the lower 64KB of I/O space. */ 268 if (start >= 65536) 269 return (0); 270 271 /* Check for overlap with 0x000 - 0x0ff as a special case. */ 272 if (start < 0x100) 273 goto alias; 274 275 /* 276 * If the start address is an alias, the range is an alias. 277 * Otherwise, compute the start of the next alias range and 278 * check if it is before the end of the candidate range. 279 */ 280 if ((start & 0x300) != 0) 281 goto alias; 282 next_alias = (start & ~0x3fful) | 0x100; 283 if (next_alias <= end) 284 goto alias; 285 return (0); 286 287 alias: 288 if (bootverbose) 289 device_printf(sc->dev, 290 "I/O range %#jx-%#jx overlaps with an ISA alias\n", start, 291 end); 292 return (1); 293 } 294 295 static void 296 pcib_add_window_resources(struct pcib_window *w, struct resource **res, 297 int count) 298 { 299 struct resource **newarray; 300 int error, i; 301 302 newarray = malloc(sizeof(struct resource *) * (w->count + count), 303 M_DEVBUF, M_WAITOK); 304 if (w->res != NULL) 305 bcopy(w->res, newarray, sizeof(struct resource *) * w->count); 306 bcopy(res, newarray + w->count, sizeof(struct resource *) * count); 307 free(w->res, M_DEVBUF); 308 w->res = newarray; 309 w->count += count; 310 311 for (i = 0; i < count; i++) { 312 error = rman_manage_region(&w->rman, rman_get_start(res[i]), 313 rman_get_end(res[i])); 314 if (error) 315 panic("Failed to add resource to rman"); 316 } 317 } 318 319 typedef void (nonisa_callback)(rman_res_t start, rman_res_t end, void *arg); 320 321 static void 322 pcib_walk_nonisa_ranges(rman_res_t start, rman_res_t end, nonisa_callback *cb, 323 void *arg) 324 { 325 rman_res_t next_end; 326 327 /* 328 * If start is within an ISA alias range, move up to the start 329 * of the next non-alias range. As a special case, addresses 330 * in the range 0x000 - 0x0ff should also be skipped since 331 * those are used for various system I/O devices in ISA 332 * systems. 333 */ 334 if (start <= 65535) { 335 if (start < 0x100 || (start & 0x300) != 0) { 336 start &= ~0x3ff; 337 start += 0x400; 338 } 339 } 340 341 /* ISA aliases are only in the lower 64KB of I/O space. */ 342 while (start <= MIN(end, 65535)) { 343 next_end = MIN(start | 0xff, end); 344 cb(start, next_end, arg); 345 start += 0x400; 346 } 347 348 if (start <= end) 349 cb(start, end, arg); 350 } 351 352 static void 353 count_ranges(rman_res_t start, rman_res_t end, void *arg) 354 { 355 int *countp; 356 357 countp = arg; 358 (*countp)++; 359 } 360 361 struct alloc_state { 362 struct resource **res; 363 struct pcib_softc *sc; 364 int count, error; 365 }; 366 367 static void 368 alloc_ranges(rman_res_t start, rman_res_t end, void *arg) 369 { 370 struct alloc_state *as; 371 struct pcib_window *w; 372 int rid; 373 374 as = arg; 375 if (as->error != 0) 376 return; 377 378 w = &as->sc->io; 379 rid = w->reg; 380 if (bootverbose) 381 device_printf(as->sc->dev, 382 "allocating non-ISA range %#jx-%#jx\n", start, end); 383 as->res[as->count] = bus_alloc_resource(as->sc->dev, SYS_RES_IOPORT, 384 &rid, start, end, end - start + 1, 0); 385 if (as->res[as->count] == NULL) 386 as->error = ENXIO; 387 else 388 as->count++; 389 } 390 391 static int 392 pcib_alloc_nonisa_ranges(struct pcib_softc *sc, rman_res_t start, rman_res_t end) 393 { 394 struct alloc_state as; 395 int i, new_count; 396 397 /* First, see how many ranges we need. */ 398 new_count = 0; 399 pcib_walk_nonisa_ranges(start, end, count_ranges, &new_count); 400 401 /* Second, allocate the ranges. */ 402 as.res = malloc(sizeof(struct resource *) * new_count, M_DEVBUF, 403 M_WAITOK); 404 as.sc = sc; 405 as.count = 0; 406 as.error = 0; 407 pcib_walk_nonisa_ranges(start, end, alloc_ranges, &as); 408 if (as.error != 0) { 409 for (i = 0; i < as.count; i++) 410 bus_release_resource(sc->dev, SYS_RES_IOPORT, 411 sc->io.reg, as.res[i]); 412 free(as.res, M_DEVBUF); 413 return (as.error); 414 } 415 KASSERT(as.count == new_count, ("%s: count mismatch", __func__)); 416 417 /* Third, add the ranges to the window. */ 418 pcib_add_window_resources(&sc->io, as.res, as.count); 419 free(as.res, M_DEVBUF); 420 return (0); 421 } 422 423 static void 424 pcib_alloc_window(struct pcib_softc *sc, struct pcib_window *w, int type, 425 int flags, pci_addr_t max_address) 426 { 427 struct resource *res; 428 char buf[64]; 429 int error, rid; 430 431 if (max_address != (rman_res_t)max_address) 432 max_address = ~0; 433 w->rman.rm_start = 0; 434 w->rman.rm_end = max_address; 435 w->rman.rm_type = RMAN_ARRAY; 436 snprintf(buf, sizeof(buf), "%s %s window", 437 device_get_nameunit(sc->dev), w->name); 438 w->rman.rm_descr = strdup(buf, M_DEVBUF); 439 error = rman_init(&w->rman); 440 if (error) 441 panic("Failed to initialize %s %s rman", 442 device_get_nameunit(sc->dev), w->name); 443 444 if (!pcib_is_window_open(w)) 445 return; 446 447 if (w->base > max_address || w->limit > max_address) { 448 device_printf(sc->dev, 449 "initial %s window has too many bits, ignoring\n", w->name); 450 return; 451 } 452 if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE) 453 (void)pcib_alloc_nonisa_ranges(sc, w->base, w->limit); 454 else { 455 rid = w->reg; 456 res = bus_alloc_resource(sc->dev, type, &rid, w->base, w->limit, 457 w->limit - w->base + 1, flags); 458 if (res != NULL) 459 pcib_add_window_resources(w, &res, 1); 460 } 461 if (w->res == NULL) { 462 device_printf(sc->dev, 463 "failed to allocate initial %s window: %#jx-%#jx\n", 464 w->name, (uintmax_t)w->base, (uintmax_t)w->limit); 465 w->base = max_address; 466 w->limit = 0; 467 pcib_write_windows(sc, w->mask); 468 return; 469 } 470 pcib_activate_window(sc, type); 471 } 472 473 /* 474 * Initialize I/O windows. 475 */ 476 static void 477 pcib_probe_windows(struct pcib_softc *sc) 478 { 479 pci_addr_t max; 480 device_t dev; 481 uint32_t val; 482 483 dev = sc->dev; 484 485 if (pci_clear_pcib) { 486 pcib_bridge_init(dev); 487 } 488 489 /* Determine if the I/O port window is implemented. */ 490 val = pci_read_config(dev, PCIR_IOBASEL_1, 1); 491 if (val == 0) { 492 /* 493 * If 'val' is zero, then only 16-bits of I/O space 494 * are supported. 495 */ 496 pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); 497 if (pci_read_config(dev, PCIR_IOBASEL_1, 1) != 0) { 498 sc->io.valid = 1; 499 pci_write_config(dev, PCIR_IOBASEL_1, 0, 1); 500 } 501 } else 502 sc->io.valid = 1; 503 504 /* Read the existing I/O port window. */ 505 if (sc->io.valid) { 506 sc->io.reg = PCIR_IOBASEL_1; 507 sc->io.step = 12; 508 sc->io.mask = WIN_IO; 509 sc->io.name = "I/O port"; 510 if ((val & PCIM_BRIO_MASK) == PCIM_BRIO_32) { 511 sc->io.base = PCI_PPBIOBASE( 512 pci_read_config(dev, PCIR_IOBASEH_1, 2), val); 513 sc->io.limit = PCI_PPBIOLIMIT( 514 pci_read_config(dev, PCIR_IOLIMITH_1, 2), 515 pci_read_config(dev, PCIR_IOLIMITL_1, 1)); 516 max = 0xffffffff; 517 } else { 518 sc->io.base = PCI_PPBIOBASE(0, val); 519 sc->io.limit = PCI_PPBIOLIMIT(0, 520 pci_read_config(dev, PCIR_IOLIMITL_1, 1)); 521 max = 0xffff; 522 } 523 pcib_alloc_window(sc, &sc->io, SYS_RES_IOPORT, 0, max); 524 } 525 526 /* Read the existing memory window. */ 527 sc->mem.valid = 1; 528 sc->mem.reg = PCIR_MEMBASE_1; 529 sc->mem.step = 20; 530 sc->mem.mask = WIN_MEM; 531 sc->mem.name = "memory"; 532 sc->mem.base = PCI_PPBMEMBASE(0, 533 pci_read_config(dev, PCIR_MEMBASE_1, 2)); 534 sc->mem.limit = PCI_PPBMEMLIMIT(0, 535 pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); 536 pcib_alloc_window(sc, &sc->mem, SYS_RES_MEMORY, 0, 0xffffffff); 537 538 /* Determine if the prefetchable memory window is implemented. */ 539 val = pci_read_config(dev, PCIR_PMBASEL_1, 2); 540 if (val == 0) { 541 /* 542 * If 'val' is zero, then only 32-bits of memory space 543 * are supported. 544 */ 545 pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); 546 if (pci_read_config(dev, PCIR_PMBASEL_1, 2) != 0) { 547 sc->pmem.valid = 1; 548 pci_write_config(dev, PCIR_PMBASEL_1, 0, 2); 549 } 550 } else 551 sc->pmem.valid = 1; 552 553 /* Read the existing prefetchable memory window. */ 554 if (sc->pmem.valid) { 555 sc->pmem.reg = PCIR_PMBASEL_1; 556 sc->pmem.step = 20; 557 sc->pmem.mask = WIN_PMEM; 558 sc->pmem.name = "prefetch"; 559 if ((val & PCIM_BRPM_MASK) == PCIM_BRPM_64) { 560 sc->pmem.base = PCI_PPBMEMBASE( 561 pci_read_config(dev, PCIR_PMBASEH_1, 4), val); 562 sc->pmem.limit = PCI_PPBMEMLIMIT( 563 pci_read_config(dev, PCIR_PMLIMITH_1, 4), 564 pci_read_config(dev, PCIR_PMLIMITL_1, 2)); 565 max = 0xffffffffffffffff; 566 } else { 567 sc->pmem.base = PCI_PPBMEMBASE(0, val); 568 sc->pmem.limit = PCI_PPBMEMLIMIT(0, 569 pci_read_config(dev, PCIR_PMLIMITL_1, 2)); 570 max = 0xffffffff; 571 } 572 pcib_alloc_window(sc, &sc->pmem, SYS_RES_MEMORY, 573 RF_PREFETCHABLE, max); 574 } 575 } 576 577 static void 578 pcib_release_window(struct pcib_softc *sc, struct pcib_window *w, int type) 579 { 580 device_t dev; 581 int error, i; 582 583 if (!w->valid) 584 return; 585 586 dev = sc->dev; 587 error = rman_fini(&w->rman); 588 if (error) { 589 device_printf(dev, "failed to release %s rman\n", w->name); 590 return; 591 } 592 free(__DECONST(char *, w->rman.rm_descr), M_DEVBUF); 593 594 for (i = 0; i < w->count; i++) { 595 error = bus_free_resource(dev, type, w->res[i]); 596 if (error) 597 device_printf(dev, 598 "failed to release %s resource: %d\n", w->name, 599 error); 600 } 601 free(w->res, M_DEVBUF); 602 } 603 604 static void 605 pcib_free_windows(struct pcib_softc *sc) 606 { 607 608 pcib_release_window(sc, &sc->pmem, SYS_RES_MEMORY); 609 pcib_release_window(sc, &sc->mem, SYS_RES_MEMORY); 610 pcib_release_window(sc, &sc->io, SYS_RES_IOPORT); 611 } 612 613 #ifdef PCI_RES_BUS 614 /* 615 * Allocate a suitable secondary bus for this bridge if needed and 616 * initialize the resource manager for the secondary bus range. Note 617 * that the minimum count is a desired value and this may allocate a 618 * smaller range. 619 */ 620 void 621 pcib_setup_secbus(device_t dev, struct pcib_secbus *bus, int min_count) 622 { 623 char buf[64]; 624 int error, rid, sec_reg; 625 626 switch (pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) { 627 case PCIM_HDRTYPE_BRIDGE: 628 sec_reg = PCIR_SECBUS_1; 629 bus->sub_reg = PCIR_SUBBUS_1; 630 break; 631 case PCIM_HDRTYPE_CARDBUS: 632 sec_reg = PCIR_SECBUS_2; 633 bus->sub_reg = PCIR_SUBBUS_2; 634 break; 635 default: 636 panic("not a PCI bridge"); 637 } 638 bus->sec = pci_read_config(dev, sec_reg, 1); 639 bus->sub = pci_read_config(dev, bus->sub_reg, 1); 640 bus->dev = dev; 641 bus->rman.rm_start = 0; 642 bus->rman.rm_end = PCI_BUSMAX; 643 bus->rman.rm_type = RMAN_ARRAY; 644 snprintf(buf, sizeof(buf), "%s bus numbers", device_get_nameunit(dev)); 645 bus->rman.rm_descr = strdup(buf, M_DEVBUF); 646 error = rman_init(&bus->rman); 647 if (error) 648 panic("Failed to initialize %s bus number rman", 649 device_get_nameunit(dev)); 650 651 /* 652 * Allocate a bus range. This will return an existing bus range 653 * if one exists, or a new bus range if one does not. 654 */ 655 rid = 0; 656 bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, 657 min_count, RF_ACTIVE); 658 if (bus->res == NULL) { 659 /* 660 * Fall back to just allocating a range of a single bus 661 * number. 662 */ 663 bus->res = bus_alloc_resource_anywhere(dev, PCI_RES_BUS, &rid, 664 1, RF_ACTIVE); 665 } else if (rman_get_size(bus->res) < min_count) 666 /* 667 * Attempt to grow the existing range to satisfy the 668 * minimum desired count. 669 */ 670 (void)bus_adjust_resource(dev, PCI_RES_BUS, bus->res, 671 rman_get_start(bus->res), rman_get_start(bus->res) + 672 min_count - 1); 673 674 /* 675 * Add the initial resource to the rman. 676 */ 677 if (bus->res != NULL) { 678 error = rman_manage_region(&bus->rman, rman_get_start(bus->res), 679 rman_get_end(bus->res)); 680 if (error) 681 panic("Failed to add resource to rman"); 682 bus->sec = rman_get_start(bus->res); 683 bus->sub = rman_get_end(bus->res); 684 } 685 } 686 687 void 688 pcib_free_secbus(device_t dev, struct pcib_secbus *bus) 689 { 690 int error; 691 692 error = rman_fini(&bus->rman); 693 if (error) { 694 device_printf(dev, "failed to release bus number rman\n"); 695 return; 696 } 697 free(__DECONST(char *, bus->rman.rm_descr), M_DEVBUF); 698 699 error = bus_free_resource(dev, PCI_RES_BUS, bus->res); 700 if (error) 701 device_printf(dev, 702 "failed to release bus numbers resource: %d\n", error); 703 } 704 705 static struct resource * 706 pcib_suballoc_bus(struct pcib_secbus *bus, device_t child, int *rid, 707 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 708 { 709 struct resource *res; 710 711 res = rman_reserve_resource(&bus->rman, start, end, count, flags, 712 child); 713 if (res == NULL) 714 return (NULL); 715 716 if (bootverbose) 717 device_printf(bus->dev, 718 "allocated bus range (%ju-%ju) for rid %d of %s\n", 719 rman_get_start(res), rman_get_end(res), *rid, 720 pcib_child_name(child)); 721 rman_set_rid(res, *rid); 722 return (res); 723 } 724 725 /* 726 * Attempt to grow the secondary bus range. This is much simpler than 727 * for I/O windows as the range can only be grown by increasing 728 * subbus. 729 */ 730 static int 731 pcib_grow_subbus(struct pcib_secbus *bus, rman_res_t new_end) 732 { 733 rman_res_t old_end; 734 int error; 735 736 old_end = rman_get_end(bus->res); 737 KASSERT(new_end > old_end, ("attempt to shrink subbus")); 738 error = bus_adjust_resource(bus->dev, PCI_RES_BUS, bus->res, 739 rman_get_start(bus->res), new_end); 740 if (error) 741 return (error); 742 if (bootverbose) 743 device_printf(bus->dev, "grew bus range to %ju-%ju\n", 744 rman_get_start(bus->res), rman_get_end(bus->res)); 745 error = rman_manage_region(&bus->rman, old_end + 1, 746 rman_get_end(bus->res)); 747 if (error) 748 panic("Failed to add resource to rman"); 749 bus->sub = rman_get_end(bus->res); 750 pci_write_config(bus->dev, bus->sub_reg, bus->sub, 1); 751 return (0); 752 } 753 754 struct resource * 755 pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid, 756 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 757 { 758 struct resource *res; 759 rman_res_t start_free, end_free, new_end; 760 761 /* 762 * First, see if the request can be satisified by the existing 763 * bus range. 764 */ 765 res = pcib_suballoc_bus(bus, child, rid, start, end, count, flags); 766 if (res != NULL) 767 return (res); 768 769 /* 770 * Figure out a range to grow the bus range. First, find the 771 * first bus number after the last allocated bus in the rman and 772 * enforce that as a minimum starting point for the range. 773 */ 774 if (rman_last_free_region(&bus->rman, &start_free, &end_free) != 0 || 775 end_free != bus->sub) 776 start_free = bus->sub + 1; 777 if (start_free < start) 778 start_free = start; 779 new_end = start_free + count - 1; 780 781 /* 782 * See if this new range would satisfy the request if it 783 * succeeds. 784 */ 785 if (new_end > end) 786 return (NULL); 787 788 /* Finally, attempt to grow the existing resource. */ 789 if (bootverbose) { 790 device_printf(bus->dev, 791 "attempting to grow bus range for %ju buses\n", count); 792 printf("\tback candidate range: %ju-%ju\n", start_free, 793 new_end); 794 } 795 if (pcib_grow_subbus(bus, new_end) == 0) 796 return (pcib_suballoc_bus(bus, child, rid, start, end, count, 797 flags)); 798 return (NULL); 799 } 800 #endif 801 802 #else 803 804 /* 805 * Is the prefetch window open (eg, can we allocate memory in it?) 806 */ 807 static int 808 pcib_is_prefetch_open(struct pcib_softc *sc) 809 { 810 return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit); 811 } 812 813 /* 814 * Is the nonprefetch window open (eg, can we allocate memory in it?) 815 */ 816 static int 817 pcib_is_nonprefetch_open(struct pcib_softc *sc) 818 { 819 return (sc->membase > 0 && sc->membase < sc->memlimit); 820 } 821 822 /* 823 * Is the io window open (eg, can we allocate ports in it?) 824 */ 825 static int 826 pcib_is_io_open(struct pcib_softc *sc) 827 { 828 return (sc->iobase > 0 && sc->iobase < sc->iolimit); 829 } 830 831 /* 832 * Get current I/O decode. 833 */ 834 static void 835 pcib_get_io_decode(struct pcib_softc *sc) 836 { 837 device_t dev; 838 uint32_t iolow; 839 840 dev = sc->dev; 841 842 iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1); 843 if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) 844 sc->iobase = PCI_PPBIOBASE( 845 pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow); 846 else 847 sc->iobase = PCI_PPBIOBASE(0, iolow); 848 849 iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1); 850 if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32) 851 sc->iolimit = PCI_PPBIOLIMIT( 852 pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow); 853 else 854 sc->iolimit = PCI_PPBIOLIMIT(0, iolow); 855 } 856 857 /* 858 * Get current memory decode. 859 */ 860 static void 861 pcib_get_mem_decode(struct pcib_softc *sc) 862 { 863 device_t dev; 864 pci_addr_t pmemlow; 865 866 dev = sc->dev; 867 868 sc->membase = PCI_PPBMEMBASE(0, 869 pci_read_config(dev, PCIR_MEMBASE_1, 2)); 870 sc->memlimit = PCI_PPBMEMLIMIT(0, 871 pci_read_config(dev, PCIR_MEMLIMIT_1, 2)); 872 873 pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2); 874 if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) 875 sc->pmembase = PCI_PPBMEMBASE( 876 pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow); 877 else 878 sc->pmembase = PCI_PPBMEMBASE(0, pmemlow); 879 880 pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2); 881 if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64) 882 sc->pmemlimit = PCI_PPBMEMLIMIT( 883 pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow); 884 else 885 sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow); 886 } 887 888 /* 889 * Restore previous I/O decode. 890 */ 891 static void 892 pcib_set_io_decode(struct pcib_softc *sc) 893 { 894 device_t dev; 895 uint32_t iohi; 896 897 dev = sc->dev; 898 899 iohi = sc->iobase >> 16; 900 if (iohi > 0) 901 pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2); 902 pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1); 903 904 iohi = sc->iolimit >> 16; 905 if (iohi > 0) 906 pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2); 907 pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1); 908 } 909 910 /* 911 * Restore previous memory decode. 912 */ 913 static void 914 pcib_set_mem_decode(struct pcib_softc *sc) 915 { 916 device_t dev; 917 pci_addr_t pmemhi; 918 919 dev = sc->dev; 920 921 pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2); 922 pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2); 923 924 pmemhi = sc->pmembase >> 32; 925 if (pmemhi > 0) 926 pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4); 927 pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2); 928 929 pmemhi = sc->pmemlimit >> 32; 930 if (pmemhi > 0) 931 pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4); 932 pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2); 933 } 934 #endif 935 936 #ifdef PCI_HP 937 /* 938 * PCI-express HotPlug support. 939 */ 940 static int pci_enable_pcie_hp = 1; 941 SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_hp, CTLFLAG_RDTUN, 942 &pci_enable_pcie_hp, 0, 943 "Enable support for native PCI-express HotPlug."); 944 945 TASKQUEUE_DEFINE_THREAD(pci_hp); 946 947 static void 948 pcib_probe_hotplug(struct pcib_softc *sc) 949 { 950 device_t dev; 951 uint32_t link_cap; 952 uint16_t link_sta, slot_sta; 953 954 if (!pci_enable_pcie_hp) 955 return; 956 957 dev = sc->dev; 958 if (pci_find_cap(dev, PCIY_EXPRESS, NULL) != 0) 959 return; 960 961 if (!(pcie_read_config(dev, PCIER_FLAGS, 2) & PCIEM_FLAGS_SLOT)) 962 return; 963 964 sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4); 965 966 if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC) == 0) 967 return; 968 link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4); 969 if ((link_cap & PCIEM_LINK_CAP_DL_ACTIVE) == 0) 970 return; 971 972 /* 973 * Some devices report that they have an MRL when they actually 974 * do not. Since they always report that the MRL is open, child 975 * devices would be ignored. Try to detect these devices and 976 * ignore their claim of HotPlug support. 977 * 978 * If there is an open MRL but the Data Link Layer is active, 979 * the MRL is not real. 980 */ 981 if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) != 0) { 982 link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); 983 slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); 984 if ((slot_sta & PCIEM_SLOT_STA_MRLSS) != 0 && 985 (link_sta & PCIEM_LINK_STA_DL_ACTIVE) != 0) { 986 return; 987 } 988 } 989 990 /* 991 * Now that we're sure we want to do hot plug, ask the 992 * firmware, if any, if that's OK. 993 */ 994 if (pcib_request_feature(dev, PCI_FEATURE_HP) != 0) { 995 if (bootverbose) 996 device_printf(dev, "Unable to activate hot plug feature.\n"); 997 return; 998 } 999 1000 sc->flags |= PCIB_HOTPLUG; 1001 } 1002 1003 /* 1004 * Send a HotPlug command to the slot control register. If this slot 1005 * uses command completion interrupts and a previous command is still 1006 * in progress, then the command is dropped. Once the previous 1007 * command completes or times out, pcib_pcie_hotplug_update() will be 1008 * invoked to post a new command based on the slot's state at that 1009 * time. 1010 */ 1011 static void 1012 pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask) 1013 { 1014 device_t dev; 1015 uint16_t ctl, new; 1016 1017 dev = sc->dev; 1018 1019 if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) 1020 return; 1021 1022 ctl = pcie_read_config(dev, PCIER_SLOT_CTL, 2); 1023 new = (ctl & ~mask) | val; 1024 if (new == ctl) 1025 return; 1026 if (bootverbose) 1027 device_printf(dev, "HotPlug command: %04x -> %04x\n", ctl, new); 1028 pcie_write_config(dev, PCIER_SLOT_CTL, new, 2); 1029 if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS) && 1030 (ctl & new) & PCIEM_SLOT_CTL_CCIE) { 1031 sc->flags |= PCIB_HOTPLUG_CMD_PENDING; 1032 if (!cold) 1033 taskqueue_enqueue_timeout(taskqueue_pci_hp, 1034 &sc->pcie_cc_task, hz); 1035 } 1036 } 1037 1038 static void 1039 pcib_pcie_hotplug_command_completed(struct pcib_softc *sc) 1040 { 1041 device_t dev; 1042 1043 dev = sc->dev; 1044 1045 if (bootverbose) 1046 device_printf(dev, "Command Completed\n"); 1047 if (!(sc->flags & PCIB_HOTPLUG_CMD_PENDING)) 1048 return; 1049 taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL); 1050 sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; 1051 wakeup(sc); 1052 } 1053 1054 /* 1055 * Returns true if a card is fully inserted from the user's 1056 * perspective. It may not yet be ready for access, but the driver 1057 * can now start enabling access if necessary. 1058 */ 1059 static bool 1060 pcib_hotplug_inserted(struct pcib_softc *sc) 1061 { 1062 1063 /* Pretend the card isn't present if a detach is forced. */ 1064 if (sc->flags & PCIB_DETACHING) 1065 return (false); 1066 1067 /* Card must be present in the slot. */ 1068 if ((sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS) == 0) 1069 return (false); 1070 1071 /* A power fault implicitly turns off power to the slot. */ 1072 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) 1073 return (false); 1074 1075 /* If the MRL is disengaged, the slot is powered off. */ 1076 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP && 1077 (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS) != 0) 1078 return (false); 1079 1080 return (true); 1081 } 1082 1083 /* 1084 * Returns -1 if the card is fully inserted, powered, and ready for 1085 * access. Otherwise, returns 0. 1086 */ 1087 static int 1088 pcib_hotplug_present(struct pcib_softc *sc) 1089 { 1090 1091 /* Card must be inserted. */ 1092 if (!pcib_hotplug_inserted(sc)) 1093 return (0); 1094 1095 /* Require the Data Link Layer to be active. */ 1096 if (!(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)) 1097 return (0); 1098 1099 return (-1); 1100 } 1101 1102 static int pci_enable_pcie_ei = 0; 1103 SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_ei, CTLFLAG_RWTUN, 1104 &pci_enable_pcie_ei, 0, 1105 "Enable support for PCI-express Electromechanical Interlock."); 1106 1107 static void 1108 pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask, 1109 bool schedule_task) 1110 { 1111 bool card_inserted, ei_engaged; 1112 1113 /* Clear DETACHING if Presence Detect has cleared. */ 1114 if ((sc->pcie_slot_sta & (PCIEM_SLOT_STA_PDC | PCIEM_SLOT_STA_PDS)) == 1115 PCIEM_SLOT_STA_PDC) 1116 sc->flags &= ~PCIB_DETACHING; 1117 1118 card_inserted = pcib_hotplug_inserted(sc); 1119 1120 /* Turn the power indicator on if a card is inserted. */ 1121 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PIP) { 1122 mask |= PCIEM_SLOT_CTL_PIC; 1123 if (card_inserted) 1124 val |= PCIEM_SLOT_CTL_PI_ON; 1125 else if (sc->flags & PCIB_DETACH_PENDING) 1126 val |= PCIEM_SLOT_CTL_PI_BLINK; 1127 else 1128 val |= PCIEM_SLOT_CTL_PI_OFF; 1129 } 1130 1131 /* Turn the power on via the Power Controller if a card is inserted. */ 1132 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) { 1133 mask |= PCIEM_SLOT_CTL_PCC; 1134 if (card_inserted) 1135 val |= PCIEM_SLOT_CTL_PC_ON; 1136 else 1137 val |= PCIEM_SLOT_CTL_PC_OFF; 1138 } 1139 1140 /* 1141 * If a card is inserted, enable the Electromechanical 1142 * Interlock. If a card is not inserted (or we are in the 1143 * process of detaching), disable the Electromechanical 1144 * Interlock. 1145 */ 1146 if ((sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP) && 1147 pci_enable_pcie_ei) { 1148 mask |= PCIEM_SLOT_CTL_EIC; 1149 ei_engaged = (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) != 0; 1150 if (card_inserted != ei_engaged) 1151 val |= PCIEM_SLOT_CTL_EIC; 1152 } 1153 1154 /* 1155 * Start a timer to see if the Data Link Layer times out. 1156 * Note that we only start the timer if Presence Detect or MRL Sensor 1157 * changed on this interrupt. Stop any scheduled timer if 1158 * the Data Link Layer is active. 1159 */ 1160 if (card_inserted && 1161 !(sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) && 1162 sc->pcie_slot_sta & 1163 (PCIEM_SLOT_STA_MRLSC | PCIEM_SLOT_STA_PDC)) { 1164 if (cold) 1165 device_printf(sc->dev, 1166 "Data Link Layer inactive\n"); 1167 else 1168 taskqueue_enqueue_timeout(taskqueue_pci_hp, 1169 &sc->pcie_dll_task, hz); 1170 } else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE) 1171 taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_dll_task, 1172 NULL); 1173 1174 pcib_pcie_hotplug_command(sc, val, mask); 1175 1176 /* 1177 * During attach the child "pci" device is added synchronously; 1178 * otherwise, the task is scheduled to manage the child 1179 * device. 1180 */ 1181 if (schedule_task && 1182 (pcib_hotplug_present(sc) != 0) != (sc->child != NULL)) 1183 taskqueue_enqueue(taskqueue_pci_hp, &sc->pcie_hp_task); 1184 } 1185 1186 static void 1187 pcib_pcie_intr_hotplug(void *arg) 1188 { 1189 struct pcib_softc *sc; 1190 device_t dev; 1191 uint16_t old_slot_sta; 1192 1193 sc = arg; 1194 dev = sc->dev; 1195 PCIB_HP_LOCK(sc); 1196 old_slot_sta = sc->pcie_slot_sta; 1197 sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); 1198 1199 /* Clear the events just reported. */ 1200 pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); 1201 1202 if (bootverbose) 1203 device_printf(dev, "HotPlug interrupt: %#x\n", 1204 sc->pcie_slot_sta); 1205 1206 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_ABP) { 1207 if (sc->flags & PCIB_DETACH_PENDING) { 1208 device_printf(dev, 1209 "Attention Button Pressed: Detach Cancelled\n"); 1210 sc->flags &= ~PCIB_DETACH_PENDING; 1211 taskqueue_cancel_timeout(taskqueue_pci_hp, 1212 &sc->pcie_ab_task, NULL); 1213 } else if (old_slot_sta & PCIEM_SLOT_STA_PDS) { 1214 /* Only initiate detach sequence if device present. */ 1215 device_printf(dev, 1216 "Attention Button Pressed: Detaching in 5 seconds\n"); 1217 sc->flags |= PCIB_DETACH_PENDING; 1218 taskqueue_enqueue_timeout(taskqueue_pci_hp, 1219 &sc->pcie_ab_task, 5 * hz); 1220 } 1221 } 1222 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD) 1223 device_printf(dev, "Power Fault Detected\n"); 1224 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSC) 1225 device_printf(dev, "MRL Sensor Changed to %s\n", 1226 sc->pcie_slot_sta & PCIEM_SLOT_STA_MRLSS ? "open" : 1227 "closed"); 1228 if (bootverbose && sc->pcie_slot_sta & PCIEM_SLOT_STA_PDC) 1229 device_printf(dev, "Presence Detect Changed to %s\n", 1230 sc->pcie_slot_sta & PCIEM_SLOT_STA_PDS ? "card present" : 1231 "empty"); 1232 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_CC) 1233 pcib_pcie_hotplug_command_completed(sc); 1234 if (sc->pcie_slot_sta & PCIEM_SLOT_STA_DLLSC) { 1235 sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); 1236 if (bootverbose) 1237 device_printf(dev, 1238 "Data Link Layer State Changed to %s\n", 1239 sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE ? 1240 "active" : "inactive"); 1241 } 1242 1243 pcib_pcie_hotplug_update(sc, 0, 0, true); 1244 PCIB_HP_UNLOCK(sc); 1245 } 1246 1247 static void 1248 pcib_pcie_hotplug_task(void *context, int pending) 1249 { 1250 struct pcib_softc *sc; 1251 device_t dev; 1252 1253 sc = context; 1254 PCIB_HP_LOCK(sc); 1255 dev = sc->dev; 1256 if (pcib_hotplug_present(sc) != 0) { 1257 if (sc->child == NULL) { 1258 sc->child = device_add_child(dev, "pci", -1); 1259 bus_generic_attach(dev); 1260 } 1261 } else { 1262 if (sc->child != NULL) { 1263 if (device_delete_child(dev, sc->child) == 0) 1264 sc->child = NULL; 1265 } 1266 } 1267 PCIB_HP_UNLOCK(sc); 1268 } 1269 1270 static void 1271 pcib_pcie_ab_timeout(void *arg, int pending) 1272 { 1273 struct pcib_softc *sc = arg; 1274 1275 PCIB_HP_LOCK(sc); 1276 if (sc->flags & PCIB_DETACH_PENDING) { 1277 sc->flags |= PCIB_DETACHING; 1278 sc->flags &= ~PCIB_DETACH_PENDING; 1279 pcib_pcie_hotplug_update(sc, 0, 0, true); 1280 } 1281 PCIB_HP_UNLOCK(sc); 1282 } 1283 1284 static void 1285 pcib_pcie_cc_timeout(void *arg, int pending) 1286 { 1287 struct pcib_softc *sc = arg; 1288 device_t dev = sc->dev; 1289 uint16_t sta; 1290 1291 PCIB_HP_LOCK(sc); 1292 sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); 1293 if (!(sta & PCIEM_SLOT_STA_CC)) { 1294 device_printf(dev, "HotPlug Command Timed Out\n"); 1295 sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; 1296 } else { 1297 device_printf(dev, 1298 "Missed HotPlug interrupt waiting for Command Completion\n"); 1299 pcib_pcie_intr_hotplug(sc); 1300 } 1301 PCIB_HP_UNLOCK(sc); 1302 } 1303 1304 static void 1305 pcib_pcie_dll_timeout(void *arg, int pending) 1306 { 1307 struct pcib_softc *sc = arg; 1308 device_t dev = sc->dev; 1309 uint16_t sta; 1310 1311 PCIB_HP_LOCK(sc); 1312 sta = pcie_read_config(dev, PCIER_LINK_STA, 2); 1313 if (!(sta & PCIEM_LINK_STA_DL_ACTIVE)) { 1314 device_printf(dev, 1315 "Timed out waiting for Data Link Layer Active\n"); 1316 sc->flags |= PCIB_DETACHING; 1317 pcib_pcie_hotplug_update(sc, 0, 0, true); 1318 } else if (sta != sc->pcie_link_sta) { 1319 device_printf(dev, 1320 "Missed HotPlug interrupt waiting for DLL Active\n"); 1321 pcib_pcie_intr_hotplug(sc); 1322 } 1323 PCIB_HP_UNLOCK(sc); 1324 } 1325 1326 static int 1327 pcib_alloc_pcie_irq(struct pcib_softc *sc) 1328 { 1329 device_t dev; 1330 int count, error, mem_rid, rid; 1331 1332 rid = -1; 1333 dev = sc->dev; 1334 1335 /* 1336 * For simplicity, only use MSI-X if there is a single message. 1337 * To support a device with multiple messages we would have to 1338 * use remap intr if the MSI number is not 0. 1339 */ 1340 count = pci_msix_count(dev); 1341 if (count == 1) { 1342 mem_rid = pci_msix_table_bar(dev); 1343 sc->pcie_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 1344 &mem_rid, RF_ACTIVE); 1345 if (sc->pcie_mem == NULL) { 1346 device_printf(dev, 1347 "Failed to allocate BAR for MSI-X table\n"); 1348 } else { 1349 error = pci_alloc_msix(dev, &count); 1350 if (error == 0) 1351 rid = 1; 1352 } 1353 } 1354 1355 if (rid < 0 && pci_msi_count(dev) > 0) { 1356 count = 1; 1357 error = pci_alloc_msi(dev, &count); 1358 if (error == 0) 1359 rid = 1; 1360 } 1361 1362 if (rid < 0) 1363 rid = 0; 1364 1365 sc->pcie_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1366 RF_ACTIVE | RF_SHAREABLE); 1367 if (sc->pcie_irq == NULL) { 1368 device_printf(dev, 1369 "Failed to allocate interrupt for PCI-e events\n"); 1370 if (rid > 0) 1371 pci_release_msi(dev); 1372 return (ENXIO); 1373 } 1374 1375 error = bus_setup_intr(dev, sc->pcie_irq, INTR_TYPE_MISC|INTR_MPSAFE, 1376 NULL, pcib_pcie_intr_hotplug, sc, &sc->pcie_ihand); 1377 if (error) { 1378 device_printf(dev, "Failed to setup PCI-e interrupt handler\n"); 1379 bus_release_resource(dev, SYS_RES_IRQ, rid, sc->pcie_irq); 1380 if (rid > 0) 1381 pci_release_msi(dev); 1382 return (error); 1383 } 1384 return (0); 1385 } 1386 1387 static int 1388 pcib_release_pcie_irq(struct pcib_softc *sc) 1389 { 1390 device_t dev; 1391 int error; 1392 1393 dev = sc->dev; 1394 error = bus_teardown_intr(dev, sc->pcie_irq, sc->pcie_ihand); 1395 if (error) 1396 return (error); 1397 error = bus_free_resource(dev, SYS_RES_IRQ, sc->pcie_irq); 1398 if (error) 1399 return (error); 1400 error = pci_release_msi(dev); 1401 if (error) 1402 return (error); 1403 if (sc->pcie_mem != NULL) 1404 error = bus_free_resource(dev, SYS_RES_MEMORY, sc->pcie_mem); 1405 return (error); 1406 } 1407 1408 static void 1409 pcib_setup_hotplug(struct pcib_softc *sc) 1410 { 1411 device_t dev; 1412 uint16_t mask, val; 1413 1414 dev = sc->dev; 1415 TASK_INIT(&sc->pcie_hp_task, 0, pcib_pcie_hotplug_task, sc); 1416 TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_ab_task, 0, 1417 pcib_pcie_ab_timeout, sc); 1418 TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_cc_task, 0, 1419 pcib_pcie_cc_timeout, sc); 1420 TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_dll_task, 0, 1421 pcib_pcie_dll_timeout, sc); 1422 sc->pcie_hp_lock = bus_topo_mtx(); 1423 1424 /* Allocate IRQ. */ 1425 if (pcib_alloc_pcie_irq(sc) != 0) 1426 return; 1427 1428 sc->pcie_link_sta = pcie_read_config(dev, PCIER_LINK_STA, 2); 1429 sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2); 1430 1431 /* Clear any events previously pending. */ 1432 pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2); 1433 1434 /* Enable HotPlug events. */ 1435 mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | 1436 PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | 1437 PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; 1438 val = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | PCIEM_SLOT_CTL_PDCE; 1439 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_APB) 1440 val |= PCIEM_SLOT_CTL_ABPE; 1441 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP) 1442 val |= PCIEM_SLOT_CTL_PFDE; 1443 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_MRLSP) 1444 val |= PCIEM_SLOT_CTL_MRLSCE; 1445 if (!(sc->pcie_slot_cap & PCIEM_SLOT_CAP_NCCS)) 1446 val |= PCIEM_SLOT_CTL_CCIE; 1447 1448 /* Turn the attention indicator off. */ 1449 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { 1450 mask |= PCIEM_SLOT_CTL_AIC; 1451 val |= PCIEM_SLOT_CTL_AI_OFF; 1452 } 1453 1454 pcib_pcie_hotplug_update(sc, val, mask, false); 1455 } 1456 1457 static int 1458 pcib_detach_hotplug(struct pcib_softc *sc) 1459 { 1460 uint16_t mask, val; 1461 int error; 1462 1463 /* Disable the card in the slot and force it to detach. */ 1464 if (sc->flags & PCIB_DETACH_PENDING) { 1465 sc->flags &= ~PCIB_DETACH_PENDING; 1466 taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task, 1467 NULL); 1468 } 1469 sc->flags |= PCIB_DETACHING; 1470 1471 if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) { 1472 taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, 1473 NULL); 1474 tsleep(sc, 0, "hpcmd", hz); 1475 sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING; 1476 } 1477 1478 /* Disable HotPlug events. */ 1479 mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE | 1480 PCIEM_SLOT_CTL_CCIE | PCIEM_SLOT_CTL_PDCE | PCIEM_SLOT_CTL_MRLSCE | 1481 PCIEM_SLOT_CTL_PFDE | PCIEM_SLOT_CTL_ABPE; 1482 val = 0; 1483 1484 /* Turn the attention indicator off. */ 1485 if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_AIP) { 1486 mask |= PCIEM_SLOT_CTL_AIC; 1487 val |= PCIEM_SLOT_CTL_AI_OFF; 1488 } 1489 1490 pcib_pcie_hotplug_update(sc, val, mask, false); 1491 1492 error = pcib_release_pcie_irq(sc); 1493 if (error) 1494 return (error); 1495 taskqueue_drain(taskqueue_pci_hp, &sc->pcie_hp_task); 1496 taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_ab_task); 1497 taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_cc_task); 1498 taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_dll_task); 1499 return (0); 1500 } 1501 #endif 1502 1503 /* 1504 * Get current bridge configuration. 1505 */ 1506 static void 1507 pcib_cfg_save(struct pcib_softc *sc) 1508 { 1509 #ifndef NEW_PCIB 1510 device_t dev; 1511 uint16_t command; 1512 1513 dev = sc->dev; 1514 1515 command = pci_read_config(dev, PCIR_COMMAND, 2); 1516 if (command & PCIM_CMD_PORTEN) 1517 pcib_get_io_decode(sc); 1518 if (command & PCIM_CMD_MEMEN) 1519 pcib_get_mem_decode(sc); 1520 #endif 1521 } 1522 1523 /* 1524 * Restore previous bridge configuration. 1525 */ 1526 static void 1527 pcib_cfg_restore(struct pcib_softc *sc) 1528 { 1529 #ifndef NEW_PCIB 1530 uint16_t command; 1531 #endif 1532 1533 #ifdef NEW_PCIB 1534 pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM); 1535 #else 1536 command = pci_read_config(sc->dev, PCIR_COMMAND, 2); 1537 if (command & PCIM_CMD_PORTEN) 1538 pcib_set_io_decode(sc); 1539 if (command & PCIM_CMD_MEMEN) 1540 pcib_set_mem_decode(sc); 1541 #endif 1542 } 1543 1544 /* 1545 * Generic device interface 1546 */ 1547 static int 1548 pcib_probe(device_t dev) 1549 { 1550 if ((pci_get_class(dev) == PCIC_BRIDGE) && 1551 (pci_get_subclass(dev) == PCIS_BRIDGE_PCI)) { 1552 device_set_desc(dev, "PCI-PCI bridge"); 1553 return(-10000); 1554 } 1555 return(ENXIO); 1556 } 1557 1558 void 1559 pcib_attach_common(device_t dev) 1560 { 1561 struct pcib_softc *sc; 1562 struct sysctl_ctx_list *sctx; 1563 struct sysctl_oid *soid; 1564 int comma; 1565 1566 sc = device_get_softc(dev); 1567 sc->dev = dev; 1568 1569 /* 1570 * Get current bridge configuration. 1571 */ 1572 sc->domain = pci_get_domain(dev); 1573 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) 1574 sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1); 1575 sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); 1576 #endif 1577 sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); 1578 pcib_cfg_save(sc); 1579 1580 /* 1581 * The primary bus register should always be the bus of the 1582 * parent. 1583 */ 1584 sc->pribus = pci_get_bus(dev); 1585 pci_write_config(dev, PCIR_PRIBUS_1, sc->pribus, 1); 1586 1587 /* 1588 * Setup sysctl reporting nodes 1589 */ 1590 sctx = device_get_sysctl_ctx(dev); 1591 soid = device_get_sysctl_tree(dev); 1592 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", 1593 CTLFLAG_RD, &sc->domain, 0, "Domain number"); 1594 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", 1595 CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); 1596 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", 1597 CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); 1598 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", 1599 CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); 1600 1601 /* 1602 * Quirk handling. 1603 */ 1604 switch (pci_get_devid(dev)) { 1605 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) 1606 case 0x12258086: /* Intel 82454KX/GX (Orion) */ 1607 { 1608 uint8_t supbus; 1609 1610 supbus = pci_read_config(dev, 0x41, 1); 1611 if (supbus != 0xff) { 1612 sc->bus.sec = supbus + 1; 1613 sc->bus.sub = supbus + 1; 1614 } 1615 break; 1616 } 1617 #endif 1618 1619 /* 1620 * The i82380FB mobile docking controller is a PCI-PCI bridge, 1621 * and it is a subtractive bridge. However, the ProgIf is wrong 1622 * so the normal setting of PCIB_SUBTRACTIVE bit doesn't 1623 * happen. There are also Toshiba and Cavium ThunderX bridges 1624 * that behave this way. 1625 */ 1626 case 0xa002177d: /* Cavium ThunderX */ 1627 case 0x124b8086: /* Intel 82380FB Mobile */ 1628 case 0x060513d7: /* Toshiba ???? */ 1629 sc->flags |= PCIB_SUBTRACTIVE; 1630 break; 1631 1632 #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) 1633 /* Compaq R3000 BIOS sets wrong subordinate bus number. */ 1634 case 0x00dd10de: 1635 { 1636 char *cp; 1637 1638 if ((cp = kern_getenv("smbios.planar.maker")) == NULL) 1639 break; 1640 if (strncmp(cp, "Compal", 6) != 0) { 1641 freeenv(cp); 1642 break; 1643 } 1644 freeenv(cp); 1645 if ((cp = kern_getenv("smbios.planar.product")) == NULL) 1646 break; 1647 if (strncmp(cp, "08A0", 4) != 0) { 1648 freeenv(cp); 1649 break; 1650 } 1651 freeenv(cp); 1652 if (sc->bus.sub < 0xa) { 1653 pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1); 1654 sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1); 1655 } 1656 break; 1657 } 1658 #endif 1659 } 1660 1661 if (pci_msi_device_blacklisted(dev)) 1662 sc->flags |= PCIB_DISABLE_MSI; 1663 1664 if (pci_msix_device_blacklisted(dev)) 1665 sc->flags |= PCIB_DISABLE_MSIX; 1666 1667 /* 1668 * Intel 815, 845 and other chipsets say they are PCI-PCI bridges, 1669 * but have a ProgIF of 0x80. The 82801 family (AA, AB, BAM/CAM, 1670 * BA/CA/DB and E) PCI bridges are HUB-PCI bridges, in Intelese. 1671 * This means they act as if they were subtractively decoding 1672 * bridges and pass all transactions. Mark them and real ProgIf 1 1673 * parts as subtractive. 1674 */ 1675 if ((pci_get_devid(dev) & 0xff00ffff) == 0x24008086 || 1676 pci_read_config(dev, PCIR_PROGIF, 1) == PCIP_BRIDGE_PCI_SUBTRACTIVE) 1677 sc->flags |= PCIB_SUBTRACTIVE; 1678 1679 #ifdef PCI_HP 1680 pcib_probe_hotplug(sc); 1681 #endif 1682 #ifdef NEW_PCIB 1683 #ifdef PCI_RES_BUS 1684 pcib_setup_secbus(dev, &sc->bus, 1); 1685 #endif 1686 pcib_probe_windows(sc); 1687 #endif 1688 #ifdef PCI_HP 1689 if (sc->flags & PCIB_HOTPLUG) 1690 pcib_setup_hotplug(sc); 1691 #endif 1692 if (bootverbose) { 1693 device_printf(dev, " domain %d\n", sc->domain); 1694 device_printf(dev, " secondary bus %d\n", sc->bus.sec); 1695 device_printf(dev, " subordinate bus %d\n", sc->bus.sub); 1696 #ifdef NEW_PCIB 1697 if (pcib_is_window_open(&sc->io)) 1698 device_printf(dev, " I/O decode 0x%jx-0x%jx\n", 1699 (uintmax_t)sc->io.base, (uintmax_t)sc->io.limit); 1700 if (pcib_is_window_open(&sc->mem)) 1701 device_printf(dev, " memory decode 0x%jx-0x%jx\n", 1702 (uintmax_t)sc->mem.base, (uintmax_t)sc->mem.limit); 1703 if (pcib_is_window_open(&sc->pmem)) 1704 device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", 1705 (uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit); 1706 #else 1707 if (pcib_is_io_open(sc)) 1708 device_printf(dev, " I/O decode 0x%x-0x%x\n", 1709 sc->iobase, sc->iolimit); 1710 if (pcib_is_nonprefetch_open(sc)) 1711 device_printf(dev, " memory decode 0x%jx-0x%jx\n", 1712 (uintmax_t)sc->membase, (uintmax_t)sc->memlimit); 1713 if (pcib_is_prefetch_open(sc)) 1714 device_printf(dev, " prefetched decode 0x%jx-0x%jx\n", 1715 (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); 1716 #endif 1717 if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) || 1718 sc->flags & PCIB_SUBTRACTIVE) { 1719 device_printf(dev, " special decode "); 1720 comma = 0; 1721 if (sc->bridgectl & PCIB_BCR_ISA_ENABLE) { 1722 printf("ISA"); 1723 comma = 1; 1724 } 1725 if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) { 1726 printf("%sVGA", comma ? ", " : ""); 1727 comma = 1; 1728 } 1729 if (sc->flags & PCIB_SUBTRACTIVE) 1730 printf("%ssubtractive", comma ? ", " : ""); 1731 printf("\n"); 1732 } 1733 } 1734 1735 /* 1736 * Always enable busmastering on bridges so that transactions 1737 * initiated on the secondary bus are passed through to the 1738 * primary bus. 1739 */ 1740 pci_enable_busmaster(dev); 1741 } 1742 1743 #ifdef PCI_HP 1744 static int 1745 pcib_present(struct pcib_softc *sc) 1746 { 1747 1748 if (sc->flags & PCIB_HOTPLUG) 1749 return (pcib_hotplug_present(sc) != 0); 1750 return (1); 1751 } 1752 #endif 1753 1754 int 1755 pcib_attach_child(device_t dev) 1756 { 1757 struct pcib_softc *sc; 1758 1759 sc = device_get_softc(dev); 1760 if (sc->bus.sec == 0) { 1761 /* no secondary bus; we should have fixed this */ 1762 return(0); 1763 } 1764 1765 #ifdef PCI_HP 1766 if (!pcib_present(sc)) { 1767 /* An empty HotPlug slot, so don't add a PCI bus yet. */ 1768 return (0); 1769 } 1770 #endif 1771 1772 sc->child = device_add_child(dev, "pci", -1); 1773 return (bus_generic_attach(dev)); 1774 } 1775 1776 int 1777 pcib_attach(device_t dev) 1778 { 1779 1780 pcib_attach_common(dev); 1781 return (pcib_attach_child(dev)); 1782 } 1783 1784 int 1785 pcib_detach(device_t dev) 1786 { 1787 #if defined(PCI_HP) || defined(NEW_PCIB) 1788 struct pcib_softc *sc; 1789 #endif 1790 int error; 1791 1792 #if defined(PCI_HP) || defined(NEW_PCIB) 1793 sc = device_get_softc(dev); 1794 #endif 1795 error = bus_generic_detach(dev); 1796 if (error) 1797 return (error); 1798 #ifdef PCI_HP 1799 if (sc->flags & PCIB_HOTPLUG) { 1800 error = pcib_detach_hotplug(sc); 1801 if (error) 1802 return (error); 1803 } 1804 #endif 1805 error = device_delete_children(dev); 1806 if (error) 1807 return (error); 1808 #ifdef NEW_PCIB 1809 pcib_free_windows(sc); 1810 #ifdef PCI_RES_BUS 1811 pcib_free_secbus(dev, &sc->bus); 1812 #endif 1813 #endif 1814 return (0); 1815 } 1816 1817 int 1818 pcib_suspend(device_t dev) 1819 { 1820 1821 pcib_cfg_save(device_get_softc(dev)); 1822 return (bus_generic_suspend(dev)); 1823 } 1824 1825 int 1826 pcib_resume(device_t dev) 1827 { 1828 1829 pcib_cfg_restore(device_get_softc(dev)); 1830 1831 /* 1832 * Restore the Command register only after restoring the windows. 1833 * The bridge should not be claiming random windows. 1834 */ 1835 pci_write_config(dev, PCIR_COMMAND, pci_get_cmdreg(dev), 2); 1836 return (bus_generic_resume(dev)); 1837 } 1838 1839 void 1840 pcib_bridge_init(device_t dev) 1841 { 1842 pci_write_config(dev, PCIR_IOBASEL_1, 0xff, 1); 1843 pci_write_config(dev, PCIR_IOBASEH_1, 0xffff, 2); 1844 pci_write_config(dev, PCIR_IOLIMITL_1, 0, 1); 1845 pci_write_config(dev, PCIR_IOLIMITH_1, 0, 2); 1846 pci_write_config(dev, PCIR_MEMBASE_1, 0xffff, 2); 1847 pci_write_config(dev, PCIR_MEMLIMIT_1, 0, 2); 1848 pci_write_config(dev, PCIR_PMBASEL_1, 0xffff, 2); 1849 pci_write_config(dev, PCIR_PMBASEH_1, 0xffffffff, 4); 1850 pci_write_config(dev, PCIR_PMLIMITL_1, 0, 2); 1851 pci_write_config(dev, PCIR_PMLIMITH_1, 0, 4); 1852 } 1853 1854 int 1855 pcib_child_present(device_t dev, device_t child) 1856 { 1857 #ifdef PCI_HP 1858 struct pcib_softc *sc = device_get_softc(dev); 1859 int retval; 1860 1861 retval = bus_child_present(dev); 1862 if (retval != 0 && sc->flags & PCIB_HOTPLUG) 1863 retval = pcib_hotplug_present(sc); 1864 return (retval); 1865 #else 1866 return (bus_child_present(dev)); 1867 #endif 1868 } 1869 1870 int 1871 pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 1872 { 1873 struct pcib_softc *sc = device_get_softc(dev); 1874 1875 switch (which) { 1876 case PCIB_IVAR_DOMAIN: 1877 *result = sc->domain; 1878 return(0); 1879 case PCIB_IVAR_BUS: 1880 *result = sc->bus.sec; 1881 return(0); 1882 } 1883 return(ENOENT); 1884 } 1885 1886 int 1887 pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) 1888 { 1889 1890 switch (which) { 1891 case PCIB_IVAR_DOMAIN: 1892 return(EINVAL); 1893 case PCIB_IVAR_BUS: 1894 return(EINVAL); 1895 } 1896 return(ENOENT); 1897 } 1898 1899 #ifdef NEW_PCIB 1900 /* 1901 * Attempt to allocate a resource from the existing resources assigned 1902 * to a window. 1903 */ 1904 static struct resource * 1905 pcib_suballoc_resource(struct pcib_softc *sc, struct pcib_window *w, 1906 device_t child, int type, int *rid, rman_res_t start, rman_res_t end, 1907 rman_res_t count, u_int flags) 1908 { 1909 struct resource *res; 1910 1911 if (!pcib_is_window_open(w)) 1912 return (NULL); 1913 1914 res = rman_reserve_resource(&w->rman, start, end, count, 1915 flags & ~RF_ACTIVE, child); 1916 if (res == NULL) 1917 return (NULL); 1918 1919 if (bootverbose) 1920 device_printf(sc->dev, 1921 "allocated %s range (%#jx-%#jx) for rid %x of %s\n", 1922 w->name, rman_get_start(res), rman_get_end(res), *rid, 1923 pcib_child_name(child)); 1924 rman_set_rid(res, *rid); 1925 1926 /* 1927 * If the resource should be active, pass that request up the 1928 * tree. This assumes the parent drivers can handle 1929 * activating sub-allocated resources. 1930 */ 1931 if (flags & RF_ACTIVE) { 1932 if (bus_activate_resource(child, type, *rid, res) != 0) { 1933 rman_release_resource(res); 1934 return (NULL); 1935 } 1936 } 1937 1938 return (res); 1939 } 1940 1941 /* Allocate a fresh resource range for an unconfigured window. */ 1942 static int 1943 pcib_alloc_new_window(struct pcib_softc *sc, struct pcib_window *w, int type, 1944 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 1945 { 1946 struct resource *res; 1947 rman_res_t base, limit, wmask; 1948 int rid; 1949 1950 /* 1951 * If this is an I/O window on a bridge with ISA enable set 1952 * and the start address is below 64k, then try to allocate an 1953 * initial window of 0x1000 bytes long starting at address 1954 * 0xf000 and walking down. Note that if the original request 1955 * was larger than the non-aliased range size of 0x100 our 1956 * caller would have raised the start address up to 64k 1957 * already. 1958 */ 1959 if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && 1960 start < 65536) { 1961 for (base = 0xf000; (long)base >= 0; base -= 0x1000) { 1962 limit = base + 0xfff; 1963 1964 /* 1965 * Skip ranges that wouldn't work for the 1966 * original request. Note that the actual 1967 * window that overlaps are the non-alias 1968 * ranges within [base, limit], so this isn't 1969 * quite a simple comparison. 1970 */ 1971 if (start + count > limit - 0x400) 1972 continue; 1973 if (base == 0) { 1974 /* 1975 * The first open region for the window at 1976 * 0 is 0x400-0x4ff. 1977 */ 1978 if (end - count + 1 < 0x400) 1979 continue; 1980 } else { 1981 if (end - count + 1 < base) 1982 continue; 1983 } 1984 1985 if (pcib_alloc_nonisa_ranges(sc, base, limit) == 0) { 1986 w->base = base; 1987 w->limit = limit; 1988 return (0); 1989 } 1990 } 1991 return (ENOSPC); 1992 } 1993 1994 wmask = ((rman_res_t)1 << w->step) - 1; 1995 if (RF_ALIGNMENT(flags) < w->step) { 1996 flags &= ~RF_ALIGNMENT_MASK; 1997 flags |= RF_ALIGNMENT_LOG2(w->step); 1998 } 1999 start &= ~wmask; 2000 end |= wmask; 2001 count = roundup2(count, (rman_res_t)1 << w->step); 2002 rid = w->reg; 2003 res = bus_alloc_resource(sc->dev, type, &rid, start, end, count, 2004 flags & ~RF_ACTIVE); 2005 if (res == NULL) 2006 return (ENOSPC); 2007 pcib_add_window_resources(w, &res, 1); 2008 pcib_activate_window(sc, type); 2009 w->base = rman_get_start(res); 2010 w->limit = rman_get_end(res); 2011 return (0); 2012 } 2013 2014 /* Try to expand an existing window to the requested base and limit. */ 2015 static int 2016 pcib_expand_window(struct pcib_softc *sc, struct pcib_window *w, int type, 2017 rman_res_t base, rman_res_t limit) 2018 { 2019 struct resource *res; 2020 int error, i, force_64k_base; 2021 2022 KASSERT(base <= w->base && limit >= w->limit, 2023 ("attempting to shrink window")); 2024 2025 /* 2026 * XXX: pcib_grow_window() doesn't try to do this anyway and 2027 * the error handling for all the edge cases would be tedious. 2028 */ 2029 KASSERT(limit == w->limit || base == w->base, 2030 ("attempting to grow both ends of a window")); 2031 2032 /* 2033 * Yet more special handling for requests to expand an I/O 2034 * window behind an ISA-enabled bridge. Since I/O windows 2035 * have to grow in 0x1000 increments and the end of the 0xffff 2036 * range is an alias, growing a window below 64k will always 2037 * result in allocating new resources and never adjusting an 2038 * existing resource. 2039 */ 2040 if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && 2041 (limit <= 65535 || (base <= 65535 && base != w->base))) { 2042 KASSERT(limit == w->limit || limit <= 65535, 2043 ("attempting to grow both ends across 64k ISA alias")); 2044 2045 if (base != w->base) 2046 error = pcib_alloc_nonisa_ranges(sc, base, w->base - 1); 2047 else 2048 error = pcib_alloc_nonisa_ranges(sc, w->limit + 1, 2049 limit); 2050 if (error == 0) { 2051 w->base = base; 2052 w->limit = limit; 2053 } 2054 return (error); 2055 } 2056 2057 /* 2058 * Find the existing resource to adjust. Usually there is only one, 2059 * but for an ISA-enabled bridge we might be growing the I/O window 2060 * above 64k and need to find the existing resource that maps all 2061 * of the area above 64k. 2062 */ 2063 for (i = 0; i < w->count; i++) { 2064 if (rman_get_end(w->res[i]) == w->limit) 2065 break; 2066 } 2067 KASSERT(i != w->count, ("did not find existing resource")); 2068 res = w->res[i]; 2069 2070 /* 2071 * Usually the resource we found should match the window's 2072 * existing range. The one exception is the ISA-enabled case 2073 * mentioned above in which case the resource should start at 2074 * 64k. 2075 */ 2076 if (type == SYS_RES_IOPORT && sc->bridgectl & PCIB_BCR_ISA_ENABLE && 2077 w->base <= 65535) { 2078 KASSERT(rman_get_start(res) == 65536, 2079 ("existing resource mismatch")); 2080 force_64k_base = 1; 2081 } else { 2082 KASSERT(w->base == rman_get_start(res), 2083 ("existing resource mismatch")); 2084 force_64k_base = 0; 2085 } 2086 2087 error = bus_adjust_resource(sc->dev, type, res, force_64k_base ? 2088 rman_get_start(res) : base, limit); 2089 if (error) 2090 return (error); 2091 2092 /* Add the newly allocated region to the resource manager. */ 2093 if (w->base != base) { 2094 error = rman_manage_region(&w->rman, base, w->base - 1); 2095 w->base = base; 2096 } else { 2097 error = rman_manage_region(&w->rman, w->limit + 1, limit); 2098 w->limit = limit; 2099 } 2100 if (error) { 2101 if (bootverbose) 2102 device_printf(sc->dev, 2103 "failed to expand %s resource manager\n", w->name); 2104 (void)bus_adjust_resource(sc->dev, type, res, force_64k_base ? 2105 rman_get_start(res) : w->base, w->limit); 2106 } 2107 return (error); 2108 } 2109 2110 /* 2111 * Attempt to grow a window to make room for a given resource request. 2112 */ 2113 static int 2114 pcib_grow_window(struct pcib_softc *sc, struct pcib_window *w, int type, 2115 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 2116 { 2117 rman_res_t align, start_free, end_free, front, back, wmask; 2118 int error; 2119 2120 /* 2121 * Clamp the desired resource range to the maximum address 2122 * this window supports. Reject impossible requests. 2123 * 2124 * For I/O port requests behind a bridge with the ISA enable 2125 * bit set, force large allocations to start above 64k. 2126 */ 2127 if (!w->valid) 2128 return (EINVAL); 2129 if (sc->bridgectl & PCIB_BCR_ISA_ENABLE && count > 0x100 && 2130 start < 65536) 2131 start = 65536; 2132 if (end > w->rman.rm_end) 2133 end = w->rman.rm_end; 2134 if (start + count - 1 > end || start + count < start) 2135 return (EINVAL); 2136 wmask = ((rman_res_t)1 << w->step) - 1; 2137 2138 /* 2139 * If there is no resource at all, just try to allocate enough 2140 * aligned space for this resource. 2141 */ 2142 if (w->res == NULL) { 2143 error = pcib_alloc_new_window(sc, w, type, start, end, count, 2144 flags); 2145 if (error) { 2146 if (bootverbose) 2147 device_printf(sc->dev, 2148 "failed to allocate initial %s window (%#jx-%#jx,%#jx)\n", 2149 w->name, start, end, count); 2150 return (error); 2151 } 2152 if (bootverbose) 2153 device_printf(sc->dev, 2154 "allocated initial %s window of %#jx-%#jx\n", 2155 w->name, (uintmax_t)w->base, (uintmax_t)w->limit); 2156 goto updatewin; 2157 } 2158 2159 /* 2160 * See if growing the window would help. Compute the minimum 2161 * amount of address space needed on both the front and back 2162 * ends of the existing window to satisfy the allocation. 2163 * 2164 * For each end, build a candidate region adjusting for the 2165 * required alignment, etc. If there is a free region at the 2166 * edge of the window, grow from the inner edge of the free 2167 * region. Otherwise grow from the window boundary. 2168 * 2169 * Growing an I/O window below 64k for a bridge with the ISA 2170 * enable bit doesn't require any special magic as the step 2171 * size of an I/O window (1k) always includes multiple 2172 * non-alias ranges when it is grown in either direction. 2173 * 2174 * XXX: Special case: if w->res is completely empty and the 2175 * request size is larger than w->res, we should find the 2176 * optimal aligned buffer containing w->res and allocate that. 2177 */ 2178 if (bootverbose) 2179 device_printf(sc->dev, 2180 "attempting to grow %s window for (%#jx-%#jx,%#jx)\n", 2181 w->name, start, end, count); 2182 align = (rman_res_t)1 << RF_ALIGNMENT(flags); 2183 if (start < w->base) { 2184 if (rman_first_free_region(&w->rman, &start_free, &end_free) != 2185 0 || start_free != w->base) 2186 end_free = w->base; 2187 if (end_free > end) 2188 end_free = end + 1; 2189 2190 /* Move end_free down until it is properly aligned. */ 2191 end_free &= ~(align - 1); 2192 end_free--; 2193 front = end_free - (count - 1); 2194 2195 /* 2196 * The resource would now be allocated at (front, 2197 * end_free). Ensure that fits in the (start, end) 2198 * bounds. end_free is checked above. If 'front' is 2199 * ok, ensure it is properly aligned for this window. 2200 * Also check for underflow. 2201 */ 2202 if (front >= start && front <= end_free) { 2203 if (bootverbose) 2204 printf("\tfront candidate range: %#jx-%#jx\n", 2205 front, end_free); 2206 front &= ~wmask; 2207 front = w->base - front; 2208 } else 2209 front = 0; 2210 } else 2211 front = 0; 2212 if (end > w->limit) { 2213 if (rman_last_free_region(&w->rman, &start_free, &end_free) != 2214 0 || end_free != w->limit) 2215 start_free = w->limit + 1; 2216 if (start_free < start) 2217 start_free = start; 2218 2219 /* Move start_free up until it is properly aligned. */ 2220 start_free = roundup2(start_free, align); 2221 back = start_free + count - 1; 2222 2223 /* 2224 * The resource would now be allocated at (start_free, 2225 * back). Ensure that fits in the (start, end) 2226 * bounds. start_free is checked above. If 'back' is 2227 * ok, ensure it is properly aligned for this window. 2228 * Also check for overflow. 2229 */ 2230 if (back <= end && start_free <= back) { 2231 if (bootverbose) 2232 printf("\tback candidate range: %#jx-%#jx\n", 2233 start_free, back); 2234 back |= wmask; 2235 back -= w->limit; 2236 } else 2237 back = 0; 2238 } else 2239 back = 0; 2240 2241 /* 2242 * Try to allocate the smallest needed region first. 2243 * If that fails, fall back to the other region. 2244 */ 2245 error = ENOSPC; 2246 while (front != 0 || back != 0) { 2247 if (front != 0 && (front <= back || back == 0)) { 2248 error = pcib_expand_window(sc, w, type, w->base - front, 2249 w->limit); 2250 if (error == 0) 2251 break; 2252 front = 0; 2253 } else { 2254 error = pcib_expand_window(sc, w, type, w->base, 2255 w->limit + back); 2256 if (error == 0) 2257 break; 2258 back = 0; 2259 } 2260 } 2261 2262 if (error) 2263 return (error); 2264 if (bootverbose) 2265 device_printf(sc->dev, "grew %s window to %#jx-%#jx\n", 2266 w->name, (uintmax_t)w->base, (uintmax_t)w->limit); 2267 2268 updatewin: 2269 /* Write the new window. */ 2270 KASSERT((w->base & wmask) == 0, ("start address is not aligned")); 2271 KASSERT((w->limit & wmask) == wmask, ("end address is not aligned")); 2272 pcib_write_windows(sc, w->mask); 2273 return (0); 2274 } 2275 2276 /* 2277 * We have to trap resource allocation requests and ensure that the bridge 2278 * is set up to, or capable of handling them. 2279 */ 2280 static struct resource * 2281 pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, 2282 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 2283 { 2284 struct pcib_softc *sc; 2285 struct resource *r; 2286 2287 sc = device_get_softc(dev); 2288 2289 /* 2290 * VGA resources are decoded iff the VGA enable bit is set in 2291 * the bridge control register. VGA resources do not fall into 2292 * the resource windows and are passed up to the parent. 2293 */ 2294 if ((type == SYS_RES_IOPORT && pci_is_vga_ioport_range(start, end)) || 2295 (type == SYS_RES_MEMORY && pci_is_vga_memory_range(start, end))) { 2296 if (sc->bridgectl & PCIB_BCR_VGA_ENABLE) 2297 return (bus_generic_alloc_resource(dev, child, type, 2298 rid, start, end, count, flags)); 2299 else 2300 return (NULL); 2301 } 2302 2303 switch (type) { 2304 #ifdef PCI_RES_BUS 2305 case PCI_RES_BUS: 2306 return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, 2307 count, flags)); 2308 #endif 2309 case SYS_RES_IOPORT: 2310 if (pcib_is_isa_range(sc, start, end, count)) 2311 return (NULL); 2312 r = pcib_suballoc_resource(sc, &sc->io, child, type, rid, start, 2313 end, count, flags); 2314 if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) 2315 break; 2316 if (pcib_grow_window(sc, &sc->io, type, start, end, count, 2317 flags) == 0) 2318 r = pcib_suballoc_resource(sc, &sc->io, child, type, 2319 rid, start, end, count, flags); 2320 break; 2321 case SYS_RES_MEMORY: 2322 /* 2323 * For prefetchable resources, prefer the prefetchable 2324 * memory window, but fall back to the regular memory 2325 * window if that fails. Try both windows before 2326 * attempting to grow a window in case the firmware 2327 * has used a range in the regular memory window to 2328 * map a prefetchable BAR. 2329 */ 2330 if (flags & RF_PREFETCHABLE) { 2331 r = pcib_suballoc_resource(sc, &sc->pmem, child, type, 2332 rid, start, end, count, flags); 2333 if (r != NULL) 2334 break; 2335 } 2336 r = pcib_suballoc_resource(sc, &sc->mem, child, type, rid, 2337 start, end, count, flags); 2338 if (r != NULL || (sc->flags & PCIB_SUBTRACTIVE) != 0) 2339 break; 2340 if (flags & RF_PREFETCHABLE) { 2341 if (pcib_grow_window(sc, &sc->pmem, type, start, end, 2342 count, flags) == 0) { 2343 r = pcib_suballoc_resource(sc, &sc->pmem, child, 2344 type, rid, start, end, count, flags); 2345 if (r != NULL) 2346 break; 2347 } 2348 } 2349 if (pcib_grow_window(sc, &sc->mem, type, start, end, count, 2350 flags & ~RF_PREFETCHABLE) == 0) 2351 r = pcib_suballoc_resource(sc, &sc->mem, child, type, 2352 rid, start, end, count, flags); 2353 break; 2354 default: 2355 return (bus_generic_alloc_resource(dev, child, type, rid, 2356 start, end, count, flags)); 2357 } 2358 2359 /* 2360 * If attempts to suballocate from the window fail but this is a 2361 * subtractive bridge, pass the request up the tree. 2362 */ 2363 if (sc->flags & PCIB_SUBTRACTIVE && r == NULL) 2364 return (bus_generic_alloc_resource(dev, child, type, rid, 2365 start, end, count, flags)); 2366 return (r); 2367 } 2368 2369 static int 2370 pcib_adjust_resource(device_t bus, device_t child, int type, struct resource *r, 2371 rman_res_t start, rman_res_t end) 2372 { 2373 struct pcib_softc *sc; 2374 struct pcib_window *w; 2375 rman_res_t wmask; 2376 int error; 2377 2378 sc = device_get_softc(bus); 2379 2380 /* 2381 * If the resource wasn't sub-allocated from one of our region 2382 * managers then just pass the request up. 2383 */ 2384 if (!pcib_is_resource_managed(sc, type, r)) 2385 return (bus_generic_adjust_resource(bus, child, type, r, 2386 start, end)); 2387 2388 #ifdef PCI_RES_BUS 2389 if (type == PCI_RES_BUS) { 2390 /* 2391 * If our bus range isn't big enough to grow the sub-allocation 2392 * then we need to grow our bus range. Any request that would 2393 * require us to decrease the start of our own bus range is 2394 * invalid, we can only extend the end; ignore such requests 2395 * and let rman_adjust_resource fail below. 2396 */ 2397 if (start >= sc->bus.sec && end > sc->bus.sub) { 2398 error = pcib_grow_subbus(&sc->bus, end); 2399 if (error != 0) 2400 return (error); 2401 } 2402 } else 2403 #endif 2404 { 2405 /* 2406 * Resource is managed and not a secondary bus number, must 2407 * be from one of our windows. 2408 */ 2409 w = pcib_get_resource_window(sc, type, r); 2410 KASSERT(w != NULL, 2411 ("%s: no window for resource (%#jx-%#jx) type %d", 2412 __func__, rman_get_start(r), rman_get_end(r), type)); 2413 2414 /* 2415 * If our window isn't big enough to grow the sub-allocation 2416 * then we need to expand the window. 2417 */ 2418 if (start < w->base || end > w->limit) { 2419 wmask = ((rman_res_t)1 << w->step) - 1; 2420 error = pcib_expand_window(sc, w, type, 2421 MIN(start & ~wmask, w->base), 2422 MAX(end | wmask, w->limit)); 2423 if (error != 0) 2424 return (error); 2425 if (bootverbose) 2426 device_printf(sc->dev, 2427 "grew %s window to %#jx-%#jx\n", 2428 w->name, (uintmax_t)w->base, 2429 (uintmax_t)w->limit); 2430 pcib_write_windows(sc, w->mask); 2431 } 2432 } 2433 2434 return (rman_adjust_resource(r, start, end)); 2435 } 2436 2437 static int 2438 pcib_release_resource(device_t dev, device_t child, int type, int rid, 2439 struct resource *r) 2440 { 2441 struct pcib_softc *sc; 2442 int error; 2443 2444 sc = device_get_softc(dev); 2445 if (pcib_is_resource_managed(sc, type, r)) { 2446 if (rman_get_flags(r) & RF_ACTIVE) { 2447 error = bus_deactivate_resource(child, type, rid, r); 2448 if (error) 2449 return (error); 2450 } 2451 return (rman_release_resource(r)); 2452 } 2453 return (bus_generic_release_resource(dev, child, type, rid, r)); 2454 } 2455 #else 2456 /* 2457 * We have to trap resource allocation requests and ensure that the bridge 2458 * is set up to, or capable of handling them. 2459 */ 2460 static struct resource * 2461 pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, 2462 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 2463 { 2464 struct pcib_softc *sc = device_get_softc(dev); 2465 const char *name, *suffix; 2466 int ok; 2467 2468 /* 2469 * Fail the allocation for this range if it's not supported. 2470 */ 2471 name = device_get_nameunit(child); 2472 if (name == NULL) { 2473 name = ""; 2474 suffix = ""; 2475 } else 2476 suffix = " "; 2477 switch (type) { 2478 case SYS_RES_IOPORT: 2479 ok = 0; 2480 if (!pcib_is_io_open(sc)) 2481 break; 2482 ok = (start >= sc->iobase && end <= sc->iolimit); 2483 2484 /* 2485 * Make sure we allow access to VGA I/O addresses when the 2486 * bridge has the "VGA Enable" bit set. 2487 */ 2488 if (!ok && pci_is_vga_ioport_range(start, end)) 2489 ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; 2490 2491 if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { 2492 if (!ok) { 2493 if (start < sc->iobase) 2494 start = sc->iobase; 2495 if (end > sc->iolimit) 2496 end = sc->iolimit; 2497 if (start < end) 2498 ok = 1; 2499 } 2500 } else { 2501 ok = 1; 2502 #if 0 2503 /* 2504 * If we overlap with the subtractive range, then 2505 * pick the upper range to use. 2506 */ 2507 if (start < sc->iolimit && end > sc->iobase) 2508 start = sc->iolimit + 1; 2509 #endif 2510 } 2511 if (end < start) { 2512 device_printf(dev, "ioport: end (%jx) < start (%jx)\n", 2513 end, start); 2514 start = 0; 2515 end = 0; 2516 ok = 0; 2517 } 2518 if (!ok) { 2519 device_printf(dev, "%s%srequested unsupported I/O " 2520 "range 0x%jx-0x%jx (decoding 0x%x-0x%x)\n", 2521 name, suffix, start, end, sc->iobase, sc->iolimit); 2522 return (NULL); 2523 } 2524 if (bootverbose) 2525 device_printf(dev, 2526 "%s%srequested I/O range 0x%jx-0x%jx: in range\n", 2527 name, suffix, start, end); 2528 break; 2529 2530 case SYS_RES_MEMORY: 2531 ok = 0; 2532 if (pcib_is_nonprefetch_open(sc)) 2533 ok = ok || (start >= sc->membase && end <= sc->memlimit); 2534 if (pcib_is_prefetch_open(sc)) 2535 ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit); 2536 2537 /* 2538 * Make sure we allow access to VGA memory addresses when the 2539 * bridge has the "VGA Enable" bit set. 2540 */ 2541 if (!ok && pci_is_vga_memory_range(start, end)) 2542 ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0; 2543 2544 if ((sc->flags & PCIB_SUBTRACTIVE) == 0) { 2545 if (!ok) { 2546 ok = 1; 2547 if (flags & RF_PREFETCHABLE) { 2548 if (pcib_is_prefetch_open(sc)) { 2549 if (start < sc->pmembase) 2550 start = sc->pmembase; 2551 if (end > sc->pmemlimit) 2552 end = sc->pmemlimit; 2553 } else { 2554 ok = 0; 2555 } 2556 } else { /* non-prefetchable */ 2557 if (pcib_is_nonprefetch_open(sc)) { 2558 if (start < sc->membase) 2559 start = sc->membase; 2560 if (end > sc->memlimit) 2561 end = sc->memlimit; 2562 } else { 2563 ok = 0; 2564 } 2565 } 2566 } 2567 } else if (!ok) { 2568 ok = 1; /* subtractive bridge: always ok */ 2569 #if 0 2570 if (pcib_is_nonprefetch_open(sc)) { 2571 if (start < sc->memlimit && end > sc->membase) 2572 start = sc->memlimit + 1; 2573 } 2574 if (pcib_is_prefetch_open(sc)) { 2575 if (start < sc->pmemlimit && end > sc->pmembase) 2576 start = sc->pmemlimit + 1; 2577 } 2578 #endif 2579 } 2580 if (end < start) { 2581 device_printf(dev, "memory: end (%jx) < start (%jx)\n", 2582 end, start); 2583 start = 0; 2584 end = 0; 2585 ok = 0; 2586 } 2587 if (!ok && bootverbose) 2588 device_printf(dev, 2589 "%s%srequested unsupported memory range %#jx-%#jx " 2590 "(decoding %#jx-%#jx, %#jx-%#jx)\n", 2591 name, suffix, start, end, 2592 (uintmax_t)sc->membase, (uintmax_t)sc->memlimit, 2593 (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit); 2594 if (!ok) 2595 return (NULL); 2596 if (bootverbose) 2597 device_printf(dev,"%s%srequested memory range " 2598 "0x%jx-0x%jx: good\n", 2599 name, suffix, start, end); 2600 break; 2601 2602 default: 2603 break; 2604 } 2605 /* 2606 * Bridge is OK decoding this resource, so pass it up. 2607 */ 2608 return (bus_generic_alloc_resource(dev, child, type, rid, start, end, 2609 count, flags)); 2610 } 2611 #endif 2612 2613 /* 2614 * If ARI is enabled on this downstream port, translate the function number 2615 * to the non-ARI slot/function. The downstream port will convert it back in 2616 * hardware. If ARI is not enabled slot and func are not modified. 2617 */ 2618 static __inline void 2619 pcib_xlate_ari(device_t pcib, int bus, int *slot, int *func) 2620 { 2621 struct pcib_softc *sc; 2622 int ari_func; 2623 2624 sc = device_get_softc(pcib); 2625 ari_func = *func; 2626 2627 if (sc->flags & PCIB_ENABLE_ARI) { 2628 KASSERT(*slot == 0, 2629 ("Non-zero slot number with ARI enabled!")); 2630 *slot = PCIE_ARI_SLOT(ari_func); 2631 *func = PCIE_ARI_FUNC(ari_func); 2632 } 2633 } 2634 2635 static void 2636 pcib_enable_ari(struct pcib_softc *sc, uint32_t pcie_pos) 2637 { 2638 uint32_t ctl2; 2639 2640 ctl2 = pci_read_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, 4); 2641 ctl2 |= PCIEM_CTL2_ARI; 2642 pci_write_config(sc->dev, pcie_pos + PCIER_DEVICE_CTL2, ctl2, 4); 2643 2644 sc->flags |= PCIB_ENABLE_ARI; 2645 } 2646 2647 /* 2648 * PCIB interface. 2649 */ 2650 int 2651 pcib_maxslots(device_t dev) 2652 { 2653 #if !defined(__amd64__) && !defined(__i386__) 2654 uint32_t pcie_pos; 2655 uint16_t val; 2656 2657 /* 2658 * If this is a PCIe rootport or downstream switch port, there's only 2659 * one slot permitted. 2660 */ 2661 if (pci_find_cap(dev, PCIY_EXPRESS, &pcie_pos) == 0) { 2662 val = pci_read_config(dev, pcie_pos + PCIER_FLAGS, 2); 2663 val &= PCIEM_FLAGS_TYPE; 2664 if (val == PCIEM_TYPE_ROOT_PORT || 2665 val == PCIEM_TYPE_DOWNSTREAM_PORT) 2666 return (0); 2667 } 2668 #endif 2669 return (PCI_SLOTMAX); 2670 } 2671 2672 static int 2673 pcib_ari_maxslots(device_t dev) 2674 { 2675 struct pcib_softc *sc; 2676 2677 sc = device_get_softc(dev); 2678 2679 if (sc->flags & PCIB_ENABLE_ARI) 2680 return (PCIE_ARI_SLOTMAX); 2681 else 2682 return (pcib_maxslots(dev)); 2683 } 2684 2685 static int 2686 pcib_ari_maxfuncs(device_t dev) 2687 { 2688 struct pcib_softc *sc; 2689 2690 sc = device_get_softc(dev); 2691 2692 if (sc->flags & PCIB_ENABLE_ARI) 2693 return (PCIE_ARI_FUNCMAX); 2694 else 2695 return (PCI_FUNCMAX); 2696 } 2697 2698 static void 2699 pcib_ari_decode_rid(device_t pcib, uint16_t rid, int *bus, int *slot, 2700 int *func) 2701 { 2702 struct pcib_softc *sc; 2703 2704 sc = device_get_softc(pcib); 2705 2706 *bus = PCI_RID2BUS(rid); 2707 if (sc->flags & PCIB_ENABLE_ARI) { 2708 *slot = PCIE_ARI_RID2SLOT(rid); 2709 *func = PCIE_ARI_RID2FUNC(rid); 2710 } else { 2711 *slot = PCI_RID2SLOT(rid); 2712 *func = PCI_RID2FUNC(rid); 2713 } 2714 } 2715 2716 /* 2717 * Since we are a child of a PCI bus, its parent must support the pcib interface. 2718 */ 2719 static uint32_t 2720 pcib_read_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, int width) 2721 { 2722 #ifdef PCI_HP 2723 struct pcib_softc *sc; 2724 2725 sc = device_get_softc(dev); 2726 if (!pcib_present(sc)) { 2727 switch (width) { 2728 case 2: 2729 return (0xffff); 2730 case 1: 2731 return (0xff); 2732 default: 2733 return (0xffffffff); 2734 } 2735 } 2736 #endif 2737 pcib_xlate_ari(dev, b, &s, &f); 2738 return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s, 2739 f, reg, width)); 2740 } 2741 2742 static void 2743 pcib_write_config(device_t dev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) 2744 { 2745 #ifdef PCI_HP 2746 struct pcib_softc *sc; 2747 2748 sc = device_get_softc(dev); 2749 if (!pcib_present(sc)) 2750 return; 2751 #endif 2752 pcib_xlate_ari(dev, b, &s, &f); 2753 PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, 2754 reg, val, width); 2755 } 2756 2757 /* 2758 * Route an interrupt across a PCI bridge. 2759 */ 2760 int 2761 pcib_route_interrupt(device_t pcib, device_t dev, int pin) 2762 { 2763 device_t bus; 2764 int parent_intpin; 2765 int intnum; 2766 2767 /* 2768 * 2769 * The PCI standard defines a swizzle of the child-side device/intpin to 2770 * the parent-side intpin as follows. 2771 * 2772 * device = device on child bus 2773 * child_intpin = intpin on child bus slot (0-3) 2774 * parent_intpin = intpin on parent bus slot (0-3) 2775 * 2776 * parent_intpin = (device + child_intpin) % 4 2777 */ 2778 parent_intpin = (pci_get_slot(dev) + (pin - 1)) % 4; 2779 2780 /* 2781 * Our parent is a PCI bus. Its parent must export the pcib interface 2782 * which includes the ability to route interrupts. 2783 */ 2784 bus = device_get_parent(pcib); 2785 intnum = PCIB_ROUTE_INTERRUPT(device_get_parent(bus), pcib, parent_intpin + 1); 2786 if (PCI_INTERRUPT_VALID(intnum) && bootverbose) { 2787 device_printf(pcib, "slot %d INT%c is routed to irq %d\n", 2788 pci_get_slot(dev), 'A' + pin - 1, intnum); 2789 } 2790 return(intnum); 2791 } 2792 2793 /* Pass request to alloc MSI/MSI-X messages up to the parent bridge. */ 2794 int 2795 pcib_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) 2796 { 2797 struct pcib_softc *sc = device_get_softc(pcib); 2798 device_t bus; 2799 2800 if (sc->flags & PCIB_DISABLE_MSI) 2801 return (ENXIO); 2802 bus = device_get_parent(pcib); 2803 return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, 2804 irqs)); 2805 } 2806 2807 /* Pass request to release MSI/MSI-X messages up to the parent bridge. */ 2808 int 2809 pcib_release_msi(device_t pcib, device_t dev, int count, int *irqs) 2810 { 2811 device_t bus; 2812 2813 bus = device_get_parent(pcib); 2814 return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); 2815 } 2816 2817 /* Pass request to alloc an MSI-X message up to the parent bridge. */ 2818 int 2819 pcib_alloc_msix(device_t pcib, device_t dev, int *irq) 2820 { 2821 struct pcib_softc *sc = device_get_softc(pcib); 2822 device_t bus; 2823 2824 if (sc->flags & PCIB_DISABLE_MSIX) 2825 return (ENXIO); 2826 bus = device_get_parent(pcib); 2827 return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); 2828 } 2829 2830 /* Pass request to release an MSI-X message up to the parent bridge. */ 2831 int 2832 pcib_release_msix(device_t pcib, device_t dev, int irq) 2833 { 2834 device_t bus; 2835 2836 bus = device_get_parent(pcib); 2837 return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); 2838 } 2839 2840 /* Pass request to map MSI/MSI-X message up to parent bridge. */ 2841 int 2842 pcib_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, 2843 uint32_t *data) 2844 { 2845 device_t bus; 2846 int error; 2847 2848 bus = device_get_parent(pcib); 2849 error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data); 2850 if (error) 2851 return (error); 2852 2853 pci_ht_map_msi(pcib, *addr); 2854 return (0); 2855 } 2856 2857 /* Pass request for device power state up to parent bridge. */ 2858 int 2859 pcib_power_for_sleep(device_t pcib, device_t dev, int *pstate) 2860 { 2861 device_t bus; 2862 2863 bus = device_get_parent(pcib); 2864 return (PCIB_POWER_FOR_SLEEP(bus, dev, pstate)); 2865 } 2866 2867 static int 2868 pcib_ari_enabled(device_t pcib) 2869 { 2870 struct pcib_softc *sc; 2871 2872 sc = device_get_softc(pcib); 2873 2874 return ((sc->flags & PCIB_ENABLE_ARI) != 0); 2875 } 2876 2877 static int 2878 pcib_ari_get_id(device_t pcib, device_t dev, enum pci_id_type type, 2879 uintptr_t *id) 2880 { 2881 struct pcib_softc *sc; 2882 device_t bus_dev; 2883 uint8_t bus, slot, func; 2884 2885 if (type != PCI_ID_RID) { 2886 bus_dev = device_get_parent(pcib); 2887 return (PCIB_GET_ID(device_get_parent(bus_dev), dev, type, id)); 2888 } 2889 2890 sc = device_get_softc(pcib); 2891 2892 if (sc->flags & PCIB_ENABLE_ARI) { 2893 bus = pci_get_bus(dev); 2894 func = pci_get_function(dev); 2895 2896 *id = (PCI_ARI_RID(bus, func)); 2897 } else { 2898 bus = pci_get_bus(dev); 2899 slot = pci_get_slot(dev); 2900 func = pci_get_function(dev); 2901 2902 *id = (PCI_RID(bus, slot, func)); 2903 } 2904 2905 return (0); 2906 } 2907 2908 /* 2909 * Check that the downstream port (pcib) and the endpoint device (dev) both 2910 * support ARI. If so, enable it and return 0, otherwise return an error. 2911 */ 2912 static int 2913 pcib_try_enable_ari(device_t pcib, device_t dev) 2914 { 2915 struct pcib_softc *sc; 2916 int error; 2917 uint32_t cap2; 2918 int ari_cap_off; 2919 uint32_t ari_ver; 2920 uint32_t pcie_pos; 2921 2922 sc = device_get_softc(pcib); 2923 2924 /* 2925 * ARI is controlled in a register in the PCIe capability structure. 2926 * If the downstream port does not have the PCIe capability structure 2927 * then it does not support ARI. 2928 */ 2929 error = pci_find_cap(pcib, PCIY_EXPRESS, &pcie_pos); 2930 if (error != 0) 2931 return (ENODEV); 2932 2933 /* Check that the PCIe port advertises ARI support. */ 2934 cap2 = pci_read_config(pcib, pcie_pos + PCIER_DEVICE_CAP2, 4); 2935 if (!(cap2 & PCIEM_CAP2_ARI)) 2936 return (ENODEV); 2937 2938 /* 2939 * Check that the endpoint device advertises ARI support via the ARI 2940 * extended capability structure. 2941 */ 2942 error = pci_find_extcap(dev, PCIZ_ARI, &ari_cap_off); 2943 if (error != 0) 2944 return (ENODEV); 2945 2946 /* 2947 * Finally, check that the endpoint device supports the same version 2948 * of ARI that we do. 2949 */ 2950 ari_ver = pci_read_config(dev, ari_cap_off, 4); 2951 if (PCI_EXTCAP_VER(ari_ver) != PCIB_SUPPORTED_ARI_VER) { 2952 if (bootverbose) 2953 device_printf(pcib, 2954 "Unsupported version of ARI (%d) detected\n", 2955 PCI_EXTCAP_VER(ari_ver)); 2956 2957 return (ENXIO); 2958 } 2959 2960 pcib_enable_ari(sc, pcie_pos); 2961 2962 return (0); 2963 } 2964 2965 int 2966 pcib_request_feature_allow(device_t pcib, device_t dev, 2967 enum pci_feature feature) 2968 { 2969 /* 2970 * No host firmware we have to negotiate with, so we allow 2971 * every valid feature requested. 2972 */ 2973 switch (feature) { 2974 case PCI_FEATURE_AER: 2975 case PCI_FEATURE_HP: 2976 break; 2977 default: 2978 return (EINVAL); 2979 } 2980 2981 return (0); 2982 } 2983 2984 int 2985 pcib_request_feature(device_t dev, enum pci_feature feature) 2986 { 2987 2988 /* 2989 * Invoke PCIB_REQUEST_FEATURE of this bridge first in case 2990 * the firmware overrides the method of PCI-PCI bridges. 2991 */ 2992 return (PCIB_REQUEST_FEATURE(dev, dev, feature)); 2993 } 2994 2995 /* 2996 * Pass the request to use this PCI feature up the tree. Either there's a 2997 * firmware like ACPI that's using this feature that will approve (or deny) the 2998 * request to take it over, or the platform has no such firmware, in which case 2999 * the request will be approved. If the request is approved, the OS is expected 3000 * to make use of the feature or render it harmless. 3001 */ 3002 static int 3003 pcib_request_feature_default(device_t pcib, device_t dev, 3004 enum pci_feature feature) 3005 { 3006 device_t bus; 3007 3008 /* 3009 * Our parent is necessarily a pci bus. Its parent will either be 3010 * another pci bridge (which passes it up) or a host bridge that can 3011 * approve or reject the request. 3012 */ 3013 bus = device_get_parent(pcib); 3014 return (PCIB_REQUEST_FEATURE(device_get_parent(bus), dev, feature)); 3015 } 3016 3017 static int 3018 pcib_reset_child(device_t dev, device_t child, int flags) 3019 { 3020 struct pci_devinfo *pdinfo; 3021 int error; 3022 3023 error = 0; 3024 if (dev == NULL || device_get_parent(child) != dev) 3025 goto out; 3026 error = ENXIO; 3027 if (device_get_devclass(child) != devclass_find("pci")) 3028 goto out; 3029 pdinfo = device_get_ivars(dev); 3030 if (pdinfo->cfg.pcie.pcie_location != 0 && 3031 (pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT || 3032 pdinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)) { 3033 error = bus_helper_reset_prepare(child, flags); 3034 if (error == 0) { 3035 error = pcie_link_reset(dev, 3036 pdinfo->cfg.pcie.pcie_location); 3037 /* XXXKIB call _post even if error != 0 ? */ 3038 bus_helper_reset_post(child, flags); 3039 } 3040 } 3041 out: 3042 return (error); 3043 } 3044