1 /* 2 * probe.c - PCI detection and setup code 3 */ 4 5 #include <linux/kernel.h> 6 #include <linux/delay.h> 7 #include <linux/init.h> 8 #include <linux/pci.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/cpumask.h> 12 #include <linux/pci-aspm.h> 13 #include <asm-generic/pci-bridge.h> 14 #include "pci.h" 15 16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 17 #define CARDBUS_RESERVE_BUSNR 3 18 19 /* Ugh. Need to stop exporting this to modules. */ 20 LIST_HEAD(pci_root_buses); 21 EXPORT_SYMBOL(pci_root_buses); 22 23 static int find_anything(struct device *dev, void *data) 24 { 25 return 1; 26 } 27 28 /* 29 * Some device drivers need know if pci is initiated. 30 * Basically, we think pci is not initiated when there 31 * is no device to be found on the pci_bus_type. 32 */ 33 int no_pci_devices(void) 34 { 35 struct device *dev; 36 int no_devices; 37 38 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); 39 no_devices = (dev == NULL); 40 put_device(dev); 41 return no_devices; 42 } 43 EXPORT_SYMBOL(no_pci_devices); 44 45 /* 46 * PCI Bus Class 47 */ 48 static void release_pcibus_dev(struct device *dev) 49 { 50 struct pci_bus *pci_bus = to_pci_bus(dev); 51 52 if (pci_bus->bridge) 53 put_device(pci_bus->bridge); 54 pci_bus_remove_resources(pci_bus); 55 pci_release_bus_of_node(pci_bus); 56 kfree(pci_bus); 57 } 58 59 static struct class pcibus_class = { 60 .name = "pci_bus", 61 .dev_release = &release_pcibus_dev, 62 .dev_attrs = pcibus_dev_attrs, 63 }; 64 65 static int __init pcibus_class_init(void) 66 { 67 return class_register(&pcibus_class); 68 } 69 postcore_initcall(pcibus_class_init); 70 71 static u64 pci_size(u64 base, u64 maxbase, u64 mask) 72 { 73 u64 size = mask & maxbase; /* Find the significant bits */ 74 if (!size) 75 return 0; 76 77 /* Get the lowest of them to find the decode size, and 78 from that the extent. */ 79 size = (size & ~(size-1)) - 1; 80 81 /* base == maxbase can be valid only if the BAR has 82 already been programmed with all 1s. */ 83 if (base == maxbase && ((base | size) & mask) != mask) 84 return 0; 85 86 return size; 87 } 88 89 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) 90 { 91 u32 mem_type; 92 unsigned long flags; 93 94 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 95 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; 96 flags |= IORESOURCE_IO; 97 return flags; 98 } 99 100 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; 101 flags |= IORESOURCE_MEM; 102 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) 103 flags |= IORESOURCE_PREFETCH; 104 105 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; 106 switch (mem_type) { 107 case PCI_BASE_ADDRESS_MEM_TYPE_32: 108 break; 109 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 110 dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n"); 111 break; 112 case PCI_BASE_ADDRESS_MEM_TYPE_64: 113 flags |= IORESOURCE_MEM_64; 114 break; 115 default: 116 dev_warn(&dev->dev, 117 "mem unknown type %x treated as 32-bit BAR\n", 118 mem_type); 119 break; 120 } 121 return flags; 122 } 123 124 /** 125 * pci_read_base - read a PCI BAR 126 * @dev: the PCI device 127 * @type: type of the BAR 128 * @res: resource buffer to be filled in 129 * @pos: BAR position in the config space 130 * 131 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. 132 */ 133 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 134 struct resource *res, unsigned int pos) 135 { 136 u32 l, sz, mask; 137 u16 orig_cmd; 138 struct pci_bus_region region; 139 140 mask = type ? PCI_ROM_ADDRESS_MASK : ~0; 141 142 if (!dev->mmio_always_on) { 143 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); 144 pci_write_config_word(dev, PCI_COMMAND, 145 orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)); 146 } 147 148 res->name = pci_name(dev); 149 150 pci_read_config_dword(dev, pos, &l); 151 pci_write_config_dword(dev, pos, l | mask); 152 pci_read_config_dword(dev, pos, &sz); 153 pci_write_config_dword(dev, pos, l); 154 155 if (!dev->mmio_always_on) 156 pci_write_config_word(dev, PCI_COMMAND, orig_cmd); 157 158 /* 159 * All bits set in sz means the device isn't working properly. 160 * If the BAR isn't implemented, all bits must be 0. If it's a 161 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit 162 * 1 must be clear. 163 */ 164 if (!sz || sz == 0xffffffff) 165 goto fail; 166 167 /* 168 * I don't know how l can have all bits set. Copied from old code. 169 * Maybe it fixes a bug on some ancient platform. 170 */ 171 if (l == 0xffffffff) 172 l = 0; 173 174 if (type == pci_bar_unknown) { 175 res->flags = decode_bar(dev, l); 176 res->flags |= IORESOURCE_SIZEALIGN; 177 if (res->flags & IORESOURCE_IO) { 178 l &= PCI_BASE_ADDRESS_IO_MASK; 179 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; 180 } else { 181 l &= PCI_BASE_ADDRESS_MEM_MASK; 182 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 183 } 184 } else { 185 res->flags |= (l & IORESOURCE_ROM_ENABLE); 186 l &= PCI_ROM_ADDRESS_MASK; 187 mask = (u32)PCI_ROM_ADDRESS_MASK; 188 } 189 190 if (res->flags & IORESOURCE_MEM_64) { 191 u64 l64 = l; 192 u64 sz64 = sz; 193 u64 mask64 = mask | (u64)~0 << 32; 194 195 pci_read_config_dword(dev, pos + 4, &l); 196 pci_write_config_dword(dev, pos + 4, ~0); 197 pci_read_config_dword(dev, pos + 4, &sz); 198 pci_write_config_dword(dev, pos + 4, l); 199 200 l64 |= ((u64)l << 32); 201 sz64 |= ((u64)sz << 32); 202 203 sz64 = pci_size(l64, sz64, mask64); 204 205 if (!sz64) 206 goto fail; 207 208 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { 209 dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", 210 pos); 211 goto fail; 212 } 213 214 if ((sizeof(resource_size_t) < 8) && l) { 215 /* Address above 32-bit boundary; disable the BAR */ 216 pci_write_config_dword(dev, pos, 0); 217 pci_write_config_dword(dev, pos + 4, 0); 218 region.start = 0; 219 region.end = sz64; 220 pcibios_bus_to_resource(dev, res, ®ion); 221 } else { 222 region.start = l64; 223 region.end = l64 + sz64; 224 pcibios_bus_to_resource(dev, res, ®ion); 225 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", 226 pos, res); 227 } 228 } else { 229 sz = pci_size(l, sz, mask); 230 231 if (!sz) 232 goto fail; 233 234 region.start = l; 235 region.end = l + sz; 236 pcibios_bus_to_resource(dev, res, ®ion); 237 238 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); 239 } 240 241 out: 242 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 243 fail: 244 res->flags = 0; 245 goto out; 246 } 247 248 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 249 { 250 unsigned int pos, reg; 251 252 for (pos = 0; pos < howmany; pos++) { 253 struct resource *res = &dev->resource[pos]; 254 reg = PCI_BASE_ADDRESS_0 + (pos << 2); 255 pos += __pci_read_base(dev, pci_bar_unknown, res, reg); 256 } 257 258 if (rom) { 259 struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; 260 dev->rom_base_reg = rom; 261 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | 262 IORESOURCE_READONLY | IORESOURCE_CACHEABLE | 263 IORESOURCE_SIZEALIGN; 264 __pci_read_base(dev, pci_bar_mem32, res, rom); 265 } 266 } 267 268 static void __devinit pci_read_bridge_io(struct pci_bus *child) 269 { 270 struct pci_dev *dev = child->self; 271 u8 io_base_lo, io_limit_lo; 272 unsigned long base, limit; 273 struct pci_bus_region region; 274 struct resource *res, res2; 275 276 res = child->resource[0]; 277 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 278 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 279 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8; 280 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8; 281 282 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { 283 u16 io_base_hi, io_limit_hi; 284 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); 285 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); 286 base |= (io_base_hi << 16); 287 limit |= (io_limit_hi << 16); 288 } 289 290 if (base && base <= limit) { 291 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; 292 res2.flags = res->flags; 293 region.start = base; 294 region.end = limit + 0xfff; 295 pcibios_bus_to_resource(dev, &res2, ®ion); 296 if (!res->start) 297 res->start = res2.start; 298 if (!res->end) 299 res->end = res2.end; 300 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 301 } 302 } 303 304 static void __devinit pci_read_bridge_mmio(struct pci_bus *child) 305 { 306 struct pci_dev *dev = child->self; 307 u16 mem_base_lo, mem_limit_lo; 308 unsigned long base, limit; 309 struct pci_bus_region region; 310 struct resource *res; 311 312 res = child->resource[1]; 313 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); 314 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); 315 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; 316 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; 317 if (base && base <= limit) { 318 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 319 region.start = base; 320 region.end = limit + 0xfffff; 321 pcibios_bus_to_resource(dev, res, ®ion); 322 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 323 } 324 } 325 326 static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child) 327 { 328 struct pci_dev *dev = child->self; 329 u16 mem_base_lo, mem_limit_lo; 330 unsigned long base, limit; 331 struct pci_bus_region region; 332 struct resource *res; 333 334 res = child->resource[2]; 335 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); 336 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); 337 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; 338 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; 339 340 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { 341 u32 mem_base_hi, mem_limit_hi; 342 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); 343 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); 344 345 /* 346 * Some bridges set the base > limit by default, and some 347 * (broken) BIOSes do not initialize them. If we find 348 * this, just assume they are not being used. 349 */ 350 if (mem_base_hi <= mem_limit_hi) { 351 #if BITS_PER_LONG == 64 352 base |= ((long) mem_base_hi) << 32; 353 limit |= ((long) mem_limit_hi) << 32; 354 #else 355 if (mem_base_hi || mem_limit_hi) { 356 dev_err(&dev->dev, "can't handle 64-bit " 357 "address space for bridge\n"); 358 return; 359 } 360 #endif 361 } 362 } 363 if (base && base <= limit) { 364 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | 365 IORESOURCE_MEM | IORESOURCE_PREFETCH; 366 if (res->flags & PCI_PREF_RANGE_TYPE_64) 367 res->flags |= IORESOURCE_MEM_64; 368 region.start = base; 369 region.end = limit + 0xfffff; 370 pcibios_bus_to_resource(dev, res, ®ion); 371 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 372 } 373 } 374 375 void __devinit pci_read_bridge_bases(struct pci_bus *child) 376 { 377 struct pci_dev *dev = child->self; 378 struct resource *res; 379 int i; 380 381 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 382 return; 383 384 dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n", 385 child->secondary, child->subordinate, 386 dev->transparent ? " (subtractive decode)" : ""); 387 388 pci_bus_remove_resources(child); 389 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 390 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; 391 392 pci_read_bridge_io(child); 393 pci_read_bridge_mmio(child); 394 pci_read_bridge_mmio_pref(child); 395 396 if (dev->transparent) { 397 pci_bus_for_each_resource(child->parent, res, i) { 398 if (res) { 399 pci_bus_add_resource(child, res, 400 PCI_SUBTRACTIVE_DECODE); 401 dev_printk(KERN_DEBUG, &dev->dev, 402 " bridge window %pR (subtractive decode)\n", 403 res); 404 } 405 } 406 } 407 } 408 409 static struct pci_bus * pci_alloc_bus(void) 410 { 411 struct pci_bus *b; 412 413 b = kzalloc(sizeof(*b), GFP_KERNEL); 414 if (b) { 415 INIT_LIST_HEAD(&b->node); 416 INIT_LIST_HEAD(&b->children); 417 INIT_LIST_HEAD(&b->devices); 418 INIT_LIST_HEAD(&b->slots); 419 INIT_LIST_HEAD(&b->resources); 420 b->max_bus_speed = PCI_SPEED_UNKNOWN; 421 b->cur_bus_speed = PCI_SPEED_UNKNOWN; 422 } 423 return b; 424 } 425 426 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b) 427 { 428 struct pci_host_bridge *bridge; 429 430 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 431 if (bridge) { 432 INIT_LIST_HEAD(&bridge->windows); 433 bridge->bus = b; 434 } 435 436 return bridge; 437 } 438 439 static unsigned char pcix_bus_speed[] = { 440 PCI_SPEED_UNKNOWN, /* 0 */ 441 PCI_SPEED_66MHz_PCIX, /* 1 */ 442 PCI_SPEED_100MHz_PCIX, /* 2 */ 443 PCI_SPEED_133MHz_PCIX, /* 3 */ 444 PCI_SPEED_UNKNOWN, /* 4 */ 445 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ 446 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ 447 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ 448 PCI_SPEED_UNKNOWN, /* 8 */ 449 PCI_SPEED_66MHz_PCIX_266, /* 9 */ 450 PCI_SPEED_100MHz_PCIX_266, /* A */ 451 PCI_SPEED_133MHz_PCIX_266, /* B */ 452 PCI_SPEED_UNKNOWN, /* C */ 453 PCI_SPEED_66MHz_PCIX_533, /* D */ 454 PCI_SPEED_100MHz_PCIX_533, /* E */ 455 PCI_SPEED_133MHz_PCIX_533 /* F */ 456 }; 457 458 static unsigned char pcie_link_speed[] = { 459 PCI_SPEED_UNKNOWN, /* 0 */ 460 PCIE_SPEED_2_5GT, /* 1 */ 461 PCIE_SPEED_5_0GT, /* 2 */ 462 PCIE_SPEED_8_0GT, /* 3 */ 463 PCI_SPEED_UNKNOWN, /* 4 */ 464 PCI_SPEED_UNKNOWN, /* 5 */ 465 PCI_SPEED_UNKNOWN, /* 6 */ 466 PCI_SPEED_UNKNOWN, /* 7 */ 467 PCI_SPEED_UNKNOWN, /* 8 */ 468 PCI_SPEED_UNKNOWN, /* 9 */ 469 PCI_SPEED_UNKNOWN, /* A */ 470 PCI_SPEED_UNKNOWN, /* B */ 471 PCI_SPEED_UNKNOWN, /* C */ 472 PCI_SPEED_UNKNOWN, /* D */ 473 PCI_SPEED_UNKNOWN, /* E */ 474 PCI_SPEED_UNKNOWN /* F */ 475 }; 476 477 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) 478 { 479 bus->cur_bus_speed = pcie_link_speed[linksta & 0xf]; 480 } 481 EXPORT_SYMBOL_GPL(pcie_update_link_speed); 482 483 static unsigned char agp_speeds[] = { 484 AGP_UNKNOWN, 485 AGP_1X, 486 AGP_2X, 487 AGP_4X, 488 AGP_8X 489 }; 490 491 static enum pci_bus_speed agp_speed(int agp3, int agpstat) 492 { 493 int index = 0; 494 495 if (agpstat & 4) 496 index = 3; 497 else if (agpstat & 2) 498 index = 2; 499 else if (agpstat & 1) 500 index = 1; 501 else 502 goto out; 503 504 if (agp3) { 505 index += 2; 506 if (index == 5) 507 index = 0; 508 } 509 510 out: 511 return agp_speeds[index]; 512 } 513 514 515 static void pci_set_bus_speed(struct pci_bus *bus) 516 { 517 struct pci_dev *bridge = bus->self; 518 int pos; 519 520 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); 521 if (!pos) 522 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); 523 if (pos) { 524 u32 agpstat, agpcmd; 525 526 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); 527 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); 528 529 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); 530 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); 531 } 532 533 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); 534 if (pos) { 535 u16 status; 536 enum pci_bus_speed max; 537 pci_read_config_word(bridge, pos + 2, &status); 538 539 if (status & 0x8000) { 540 max = PCI_SPEED_133MHz_PCIX_533; 541 } else if (status & 0x4000) { 542 max = PCI_SPEED_133MHz_PCIX_266; 543 } else if (status & 0x0002) { 544 if (((status >> 12) & 0x3) == 2) { 545 max = PCI_SPEED_133MHz_PCIX_ECC; 546 } else { 547 max = PCI_SPEED_133MHz_PCIX; 548 } 549 } else { 550 max = PCI_SPEED_66MHz_PCIX; 551 } 552 553 bus->max_bus_speed = max; 554 bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf]; 555 556 return; 557 } 558 559 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); 560 if (pos) { 561 u32 linkcap; 562 u16 linksta; 563 564 pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap); 565 bus->max_bus_speed = pcie_link_speed[linkcap & 0xf]; 566 567 pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta); 568 pcie_update_link_speed(bus, linksta); 569 } 570 } 571 572 573 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, 574 struct pci_dev *bridge, int busnr) 575 { 576 struct pci_bus *child; 577 int i; 578 579 /* 580 * Allocate a new bus, and inherit stuff from the parent.. 581 */ 582 child = pci_alloc_bus(); 583 if (!child) 584 return NULL; 585 586 child->parent = parent; 587 child->ops = parent->ops; 588 child->sysdata = parent->sysdata; 589 child->bus_flags = parent->bus_flags; 590 591 /* initialize some portions of the bus device, but don't register it 592 * now as the parent is not properly set up yet. This device will get 593 * registered later in pci_bus_add_devices() 594 */ 595 child->dev.class = &pcibus_class; 596 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); 597 598 /* 599 * Set up the primary, secondary and subordinate 600 * bus numbers. 601 */ 602 child->number = child->secondary = busnr; 603 child->primary = parent->secondary; 604 child->subordinate = 0xff; 605 606 if (!bridge) 607 return child; 608 609 child->self = bridge; 610 child->bridge = get_device(&bridge->dev); 611 pci_set_bus_of_node(child); 612 pci_set_bus_speed(child); 613 614 /* Set up default resource pointers and names.. */ 615 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 616 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 617 child->resource[i]->name = child->name; 618 } 619 bridge->subordinate = child; 620 621 return child; 622 } 623 624 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) 625 { 626 struct pci_bus *child; 627 628 child = pci_alloc_child_bus(parent, dev, busnr); 629 if (child) { 630 down_write(&pci_bus_sem); 631 list_add_tail(&child->node, &parent->children); 632 up_write(&pci_bus_sem); 633 } 634 return child; 635 } 636 637 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) 638 { 639 struct pci_bus *parent = child->parent; 640 641 /* Attempts to fix that up are really dangerous unless 642 we're going to re-assign all bus numbers. */ 643 if (!pcibios_assign_all_busses()) 644 return; 645 646 while (parent->parent && parent->subordinate < max) { 647 parent->subordinate = max; 648 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); 649 parent = parent->parent; 650 } 651 } 652 653 /* 654 * If it's a bridge, configure it and scan the bus behind it. 655 * For CardBus bridges, we don't scan behind as the devices will 656 * be handled by the bridge driver itself. 657 * 658 * We need to process bridges in two passes -- first we scan those 659 * already configured by the BIOS and after we are done with all of 660 * them, we proceed to assigning numbers to the remaining buses in 661 * order to avoid overlaps between old and new bus numbers. 662 */ 663 int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) 664 { 665 struct pci_bus *child; 666 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 667 u32 buses, i, j = 0; 668 u16 bctl; 669 u8 primary, secondary, subordinate; 670 int broken = 0; 671 672 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 673 primary = buses & 0xFF; 674 secondary = (buses >> 8) & 0xFF; 675 subordinate = (buses >> 16) & 0xFF; 676 677 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", 678 secondary, subordinate, pass); 679 680 if (!primary && (primary != bus->number) && secondary && subordinate) { 681 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); 682 primary = bus->number; 683 } 684 685 /* Check if setup is sensible at all */ 686 if (!pass && 687 (primary != bus->number || secondary <= bus->number)) { 688 dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n"); 689 broken = 1; 690 } 691 692 /* Disable MasterAbortMode during probing to avoid reporting 693 of bus errors (in some architectures) */ 694 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 695 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 696 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 697 698 if ((secondary || subordinate) && !pcibios_assign_all_busses() && 699 !is_cardbus && !broken) { 700 unsigned int cmax; 701 /* 702 * Bus already configured by firmware, process it in the first 703 * pass and just note the configuration. 704 */ 705 if (pass) 706 goto out; 707 708 /* 709 * If we already got to this bus through a different bridge, 710 * don't re-add it. This can happen with the i450NX chipset. 711 * 712 * However, we continue to descend down the hierarchy and 713 * scan remaining child buses. 714 */ 715 child = pci_find_bus(pci_domain_nr(bus), secondary); 716 if (!child) { 717 child = pci_add_new_bus(bus, dev, secondary); 718 if (!child) 719 goto out; 720 child->primary = primary; 721 child->subordinate = subordinate; 722 child->bridge_ctl = bctl; 723 } 724 725 cmax = pci_scan_child_bus(child); 726 if (cmax > max) 727 max = cmax; 728 if (child->subordinate > max) 729 max = child->subordinate; 730 } else { 731 /* 732 * We need to assign a number to this bus which we always 733 * do in the second pass. 734 */ 735 if (!pass) { 736 if (pcibios_assign_all_busses() || broken) 737 /* Temporarily disable forwarding of the 738 configuration cycles on all bridges in 739 this bus segment to avoid possible 740 conflicts in the second pass between two 741 bridges programmed with overlapping 742 bus ranges. */ 743 pci_write_config_dword(dev, PCI_PRIMARY_BUS, 744 buses & ~0xffffff); 745 goto out; 746 } 747 748 /* Clear errors */ 749 pci_write_config_word(dev, PCI_STATUS, 0xffff); 750 751 /* Prevent assigning a bus number that already exists. 752 * This can happen when a bridge is hot-plugged, so in 753 * this case we only re-scan this bus. */ 754 child = pci_find_bus(pci_domain_nr(bus), max+1); 755 if (!child) { 756 child = pci_add_new_bus(bus, dev, ++max); 757 if (!child) 758 goto out; 759 } 760 buses = (buses & 0xff000000) 761 | ((unsigned int)(child->primary) << 0) 762 | ((unsigned int)(child->secondary) << 8) 763 | ((unsigned int)(child->subordinate) << 16); 764 765 /* 766 * yenta.c forces a secondary latency timer of 176. 767 * Copy that behaviour here. 768 */ 769 if (is_cardbus) { 770 buses &= ~0xff000000; 771 buses |= CARDBUS_LATENCY_TIMER << 24; 772 } 773 774 /* 775 * We need to blast all three values with a single write. 776 */ 777 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 778 779 if (!is_cardbus) { 780 child->bridge_ctl = bctl; 781 /* 782 * Adjust subordinate busnr in parent buses. 783 * We do this before scanning for children because 784 * some devices may not be detected if the bios 785 * was lazy. 786 */ 787 pci_fixup_parent_subordinate_busnr(child, max); 788 /* Now we can scan all subordinate buses... */ 789 max = pci_scan_child_bus(child); 790 /* 791 * now fix it up again since we have found 792 * the real value of max. 793 */ 794 pci_fixup_parent_subordinate_busnr(child, max); 795 } else { 796 /* 797 * For CardBus bridges, we leave 4 bus numbers 798 * as cards with a PCI-to-PCI bridge can be 799 * inserted later. 800 */ 801 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) { 802 struct pci_bus *parent = bus; 803 if (pci_find_bus(pci_domain_nr(bus), 804 max+i+1)) 805 break; 806 while (parent->parent) { 807 if ((!pcibios_assign_all_busses()) && 808 (parent->subordinate > max) && 809 (parent->subordinate <= max+i)) { 810 j = 1; 811 } 812 parent = parent->parent; 813 } 814 if (j) { 815 /* 816 * Often, there are two cardbus bridges 817 * -- try to leave one valid bus number 818 * for each one. 819 */ 820 i /= 2; 821 break; 822 } 823 } 824 max += i; 825 pci_fixup_parent_subordinate_busnr(child, max); 826 } 827 /* 828 * Set the subordinate bus number to its real value. 829 */ 830 child->subordinate = max; 831 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); 832 } 833 834 sprintf(child->name, 835 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), 836 pci_domain_nr(bus), child->number); 837 838 /* Has only triggered on CardBus, fixup is in yenta_socket */ 839 while (bus->parent) { 840 if ((child->subordinate > bus->subordinate) || 841 (child->number > bus->subordinate) || 842 (child->number < bus->number) || 843 (child->subordinate < bus->number)) { 844 dev_info(&child->dev, "[bus %02x-%02x] %s " 845 "hidden behind%s bridge %s [bus %02x-%02x]\n", 846 child->number, child->subordinate, 847 (bus->number > child->subordinate && 848 bus->subordinate < child->number) ? 849 "wholly" : "partially", 850 bus->self->transparent ? " transparent" : "", 851 dev_name(&bus->dev), 852 bus->number, bus->subordinate); 853 } 854 bus = bus->parent; 855 } 856 857 out: 858 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); 859 860 return max; 861 } 862 863 /* 864 * Read interrupt line and base address registers. 865 * The architecture-dependent code can tweak these, of course. 866 */ 867 static void pci_read_irq(struct pci_dev *dev) 868 { 869 unsigned char irq; 870 871 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); 872 dev->pin = irq; 873 if (irq) 874 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 875 dev->irq = irq; 876 } 877 878 void set_pcie_port_type(struct pci_dev *pdev) 879 { 880 int pos; 881 u16 reg16; 882 883 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 884 if (!pos) 885 return; 886 pdev->is_pcie = 1; 887 pdev->pcie_cap = pos; 888 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); 889 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; 890 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); 891 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 892 } 893 894 void set_pcie_hotplug_bridge(struct pci_dev *pdev) 895 { 896 int pos; 897 u16 reg16; 898 u32 reg32; 899 900 pos = pci_pcie_cap(pdev); 901 if (!pos) 902 return; 903 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); 904 if (!(reg16 & PCI_EXP_FLAGS_SLOT)) 905 return; 906 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, ®32); 907 if (reg32 & PCI_EXP_SLTCAP_HPC) 908 pdev->is_hotplug_bridge = 1; 909 } 910 911 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 912 913 /** 914 * pci_setup_device - fill in class and map information of a device 915 * @dev: the device structure to fill 916 * 917 * Initialize the device structure with information about the device's 918 * vendor,class,memory and IO-space addresses,IRQ lines etc. 919 * Called at initialisation of the PCI subsystem and by CardBus services. 920 * Returns 0 on success and negative if unknown type of device (not normal, 921 * bridge or CardBus). 922 */ 923 int pci_setup_device(struct pci_dev *dev) 924 { 925 u32 class; 926 u8 hdr_type; 927 struct pci_slot *slot; 928 int pos = 0; 929 struct pci_bus_region region; 930 struct resource *res; 931 932 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) 933 return -EIO; 934 935 dev->sysdata = dev->bus->sysdata; 936 dev->dev.parent = dev->bus->bridge; 937 dev->dev.bus = &pci_bus_type; 938 dev->hdr_type = hdr_type & 0x7f; 939 dev->multifunction = !!(hdr_type & 0x80); 940 dev->error_state = pci_channel_io_normal; 941 set_pcie_port_type(dev); 942 943 list_for_each_entry(slot, &dev->bus->slots, list) 944 if (PCI_SLOT(dev->devfn) == slot->number) 945 dev->slot = slot; 946 947 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 948 set this higher, assuming the system even supports it. */ 949 dev->dma_mask = 0xffffffff; 950 951 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 952 dev->bus->number, PCI_SLOT(dev->devfn), 953 PCI_FUNC(dev->devfn)); 954 955 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 956 dev->revision = class & 0xff; 957 dev->class = class >> 8; /* upper 3 bytes */ 958 959 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n", 960 dev->vendor, dev->device, dev->hdr_type, dev->class); 961 962 /* need to have dev->class ready */ 963 dev->cfg_size = pci_cfg_space_size(dev); 964 965 /* "Unknown power state" */ 966 dev->current_state = PCI_UNKNOWN; 967 968 /* Early fixups, before probing the BARs */ 969 pci_fixup_device(pci_fixup_early, dev); 970 /* device class may be changed after fixup */ 971 class = dev->class >> 8; 972 973 switch (dev->hdr_type) { /* header type */ 974 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 975 if (class == PCI_CLASS_BRIDGE_PCI) 976 goto bad; 977 pci_read_irq(dev); 978 pci_read_bases(dev, 6, PCI_ROM_ADDRESS); 979 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 980 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); 981 982 /* 983 * Do the ugly legacy mode stuff here rather than broken chip 984 * quirk code. Legacy mode ATA controllers have fixed 985 * addresses. These are not always echoed in BAR0-3, and 986 * BAR0-3 in a few cases contain junk! 987 */ 988 if (class == PCI_CLASS_STORAGE_IDE) { 989 u8 progif; 990 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 991 if ((progif & 1) == 0) { 992 region.start = 0x1F0; 993 region.end = 0x1F7; 994 res = &dev->resource[0]; 995 res->flags = LEGACY_IO_RESOURCE; 996 pcibios_bus_to_resource(dev, res, ®ion); 997 region.start = 0x3F6; 998 region.end = 0x3F6; 999 res = &dev->resource[1]; 1000 res->flags = LEGACY_IO_RESOURCE; 1001 pcibios_bus_to_resource(dev, res, ®ion); 1002 } 1003 if ((progif & 4) == 0) { 1004 region.start = 0x170; 1005 region.end = 0x177; 1006 res = &dev->resource[2]; 1007 res->flags = LEGACY_IO_RESOURCE; 1008 pcibios_bus_to_resource(dev, res, ®ion); 1009 region.start = 0x376; 1010 region.end = 0x376; 1011 res = &dev->resource[3]; 1012 res->flags = LEGACY_IO_RESOURCE; 1013 pcibios_bus_to_resource(dev, res, ®ion); 1014 } 1015 } 1016 break; 1017 1018 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ 1019 if (class != PCI_CLASS_BRIDGE_PCI) 1020 goto bad; 1021 /* The PCI-to-PCI bridge spec requires that subtractive 1022 decoding (i.e. transparent) bridge must have programming 1023 interface code of 0x01. */ 1024 pci_read_irq(dev); 1025 dev->transparent = ((dev->class & 0xff) == 1); 1026 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 1027 set_pcie_hotplug_bridge(dev); 1028 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); 1029 if (pos) { 1030 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); 1031 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); 1032 } 1033 break; 1034 1035 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 1036 if (class != PCI_CLASS_BRIDGE_CARDBUS) 1037 goto bad; 1038 pci_read_irq(dev); 1039 pci_read_bases(dev, 1, 0); 1040 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1041 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); 1042 break; 1043 1044 default: /* unknown header */ 1045 dev_err(&dev->dev, "unknown header type %02x, " 1046 "ignoring device\n", dev->hdr_type); 1047 return -EIO; 1048 1049 bad: 1050 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header " 1051 "type %02x)\n", dev->class, dev->hdr_type); 1052 dev->class = PCI_CLASS_NOT_DEFINED; 1053 } 1054 1055 /* We found a fine healthy device, go go go... */ 1056 return 0; 1057 } 1058 1059 static void pci_release_capabilities(struct pci_dev *dev) 1060 { 1061 pci_vpd_release(dev); 1062 pci_iov_release(dev); 1063 pci_free_cap_save_buffers(dev); 1064 } 1065 1066 /** 1067 * pci_release_dev - free a pci device structure when all users of it are finished. 1068 * @dev: device that's been disconnected 1069 * 1070 * Will be called only by the device core when all users of this pci device are 1071 * done. 1072 */ 1073 static void pci_release_dev(struct device *dev) 1074 { 1075 struct pci_dev *pci_dev; 1076 1077 pci_dev = to_pci_dev(dev); 1078 pci_release_capabilities(pci_dev); 1079 pci_release_of_node(pci_dev); 1080 kfree(pci_dev); 1081 } 1082 1083 /** 1084 * pci_cfg_space_size - get the configuration space size of the PCI device. 1085 * @dev: PCI device 1086 * 1087 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices 1088 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 1089 * access it. Maybe we don't have a way to generate extended config space 1090 * accesses, or the device is behind a reverse Express bridge. So we try 1091 * reading the dword at 0x100 which must either be 0 or a valid extended 1092 * capability header. 1093 */ 1094 int pci_cfg_space_size_ext(struct pci_dev *dev) 1095 { 1096 u32 status; 1097 int pos = PCI_CFG_SPACE_SIZE; 1098 1099 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) 1100 goto fail; 1101 if (status == 0xffffffff) 1102 goto fail; 1103 1104 return PCI_CFG_SPACE_EXP_SIZE; 1105 1106 fail: 1107 return PCI_CFG_SPACE_SIZE; 1108 } 1109 1110 int pci_cfg_space_size(struct pci_dev *dev) 1111 { 1112 int pos; 1113 u32 status; 1114 u16 class; 1115 1116 class = dev->class >> 8; 1117 if (class == PCI_CLASS_BRIDGE_HOST) 1118 return pci_cfg_space_size_ext(dev); 1119 1120 pos = pci_pcie_cap(dev); 1121 if (!pos) { 1122 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1123 if (!pos) 1124 goto fail; 1125 1126 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); 1127 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) 1128 goto fail; 1129 } 1130 1131 return pci_cfg_space_size_ext(dev); 1132 1133 fail: 1134 return PCI_CFG_SPACE_SIZE; 1135 } 1136 1137 static void pci_release_bus_bridge_dev(struct device *dev) 1138 { 1139 struct pci_host_bridge *bridge = to_pci_host_bridge(dev); 1140 1141 if (bridge->release_fn) 1142 bridge->release_fn(bridge); 1143 1144 pci_free_resource_list(&bridge->windows); 1145 1146 kfree(bridge); 1147 } 1148 1149 struct pci_dev *alloc_pci_dev(void) 1150 { 1151 struct pci_dev *dev; 1152 1153 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); 1154 if (!dev) 1155 return NULL; 1156 1157 INIT_LIST_HEAD(&dev->bus_list); 1158 1159 return dev; 1160 } 1161 EXPORT_SYMBOL(alloc_pci_dev); 1162 1163 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, 1164 int crs_timeout) 1165 { 1166 int delay = 1; 1167 1168 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 1169 return false; 1170 1171 /* some broken boards return 0 or ~0 if a slot is empty: */ 1172 if (*l == 0xffffffff || *l == 0x00000000 || 1173 *l == 0x0000ffff || *l == 0xffff0000) 1174 return false; 1175 1176 /* Configuration request Retry Status */ 1177 while (*l == 0xffff0001) { 1178 if (!crs_timeout) 1179 return false; 1180 1181 msleep(delay); 1182 delay *= 2; 1183 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 1184 return false; 1185 /* Card hasn't responded in 60 seconds? Must be stuck. */ 1186 if (delay > crs_timeout) { 1187 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " 1188 "responding\n", pci_domain_nr(bus), 1189 bus->number, PCI_SLOT(devfn), 1190 PCI_FUNC(devfn)); 1191 return false; 1192 } 1193 } 1194 1195 return true; 1196 } 1197 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); 1198 1199 /* 1200 * Read the config data for a PCI device, sanity-check it 1201 * and fill in the dev structure... 1202 */ 1203 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 1204 { 1205 struct pci_dev *dev; 1206 u32 l; 1207 1208 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) 1209 return NULL; 1210 1211 dev = alloc_pci_dev(); 1212 if (!dev) 1213 return NULL; 1214 1215 dev->bus = bus; 1216 dev->devfn = devfn; 1217 dev->vendor = l & 0xffff; 1218 dev->device = (l >> 16) & 0xffff; 1219 1220 pci_set_of_node(dev); 1221 1222 if (pci_setup_device(dev)) { 1223 kfree(dev); 1224 return NULL; 1225 } 1226 1227 return dev; 1228 } 1229 1230 static void pci_init_capabilities(struct pci_dev *dev) 1231 { 1232 /* MSI/MSI-X list */ 1233 pci_msi_init_pci_dev(dev); 1234 1235 /* Buffers for saving PCIe and PCI-X capabilities */ 1236 pci_allocate_cap_save_buffers(dev); 1237 1238 /* Power Management */ 1239 pci_pm_init(dev); 1240 platform_pci_wakeup_init(dev); 1241 1242 /* Vital Product Data */ 1243 pci_vpd_pci22_init(dev); 1244 1245 /* Alternative Routing-ID Forwarding */ 1246 pci_enable_ari(dev); 1247 1248 /* Single Root I/O Virtualization */ 1249 pci_iov_init(dev); 1250 1251 /* Enable ACS P2P upstream forwarding */ 1252 pci_enable_acs(dev); 1253 } 1254 1255 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 1256 { 1257 device_initialize(&dev->dev); 1258 dev->dev.release = pci_release_dev; 1259 pci_dev_get(dev); 1260 1261 dev->dev.dma_mask = &dev->dma_mask; 1262 dev->dev.dma_parms = &dev->dma_parms; 1263 dev->dev.coherent_dma_mask = 0xffffffffull; 1264 1265 pci_set_dma_max_seg_size(dev, 65536); 1266 pci_set_dma_seg_boundary(dev, 0xffffffff); 1267 1268 /* Fix up broken headers */ 1269 pci_fixup_device(pci_fixup_header, dev); 1270 1271 /* moved out from quirk header fixup code */ 1272 pci_reassigndev_resource_alignment(dev); 1273 1274 /* Clear the state_saved flag. */ 1275 dev->state_saved = false; 1276 1277 /* Initialize various capabilities */ 1278 pci_init_capabilities(dev); 1279 1280 /* 1281 * Add the device to our list of discovered devices 1282 * and the bus list for fixup functions, etc. 1283 */ 1284 down_write(&pci_bus_sem); 1285 list_add_tail(&dev->bus_list, &bus->devices); 1286 up_write(&pci_bus_sem); 1287 } 1288 1289 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) 1290 { 1291 struct pci_dev *dev; 1292 1293 dev = pci_get_slot(bus, devfn); 1294 if (dev) { 1295 pci_dev_put(dev); 1296 return dev; 1297 } 1298 1299 dev = pci_scan_device(bus, devfn); 1300 if (!dev) 1301 return NULL; 1302 1303 pci_device_add(dev, bus); 1304 1305 return dev; 1306 } 1307 EXPORT_SYMBOL(pci_scan_single_device); 1308 1309 static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn) 1310 { 1311 u16 cap; 1312 unsigned pos, next_fn; 1313 1314 if (!dev) 1315 return 0; 1316 1317 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1318 if (!pos) 1319 return 0; 1320 pci_read_config_word(dev, pos + 4, &cap); 1321 next_fn = cap >> 8; 1322 if (next_fn <= fn) 1323 return 0; 1324 return next_fn; 1325 } 1326 1327 static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn) 1328 { 1329 return (fn + 1) % 8; 1330 } 1331 1332 static unsigned no_next_fn(struct pci_dev *dev, unsigned fn) 1333 { 1334 return 0; 1335 } 1336 1337 static int only_one_child(struct pci_bus *bus) 1338 { 1339 struct pci_dev *parent = bus->self; 1340 1341 if (!parent || !pci_is_pcie(parent)) 1342 return 0; 1343 if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 1344 return 1; 1345 if (parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM && 1346 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) 1347 return 1; 1348 return 0; 1349 } 1350 1351 /** 1352 * pci_scan_slot - scan a PCI slot on a bus for devices. 1353 * @bus: PCI bus to scan 1354 * @devfn: slot number to scan (must have zero function.) 1355 * 1356 * Scan a PCI slot on the specified PCI bus for devices, adding 1357 * discovered devices to the @bus->devices list. New devices 1358 * will not have is_added set. 1359 * 1360 * Returns the number of new devices found. 1361 */ 1362 int pci_scan_slot(struct pci_bus *bus, int devfn) 1363 { 1364 unsigned fn, nr = 0; 1365 struct pci_dev *dev; 1366 unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn; 1367 1368 if (only_one_child(bus) && (devfn > 0)) 1369 return 0; /* Already scanned the entire slot */ 1370 1371 dev = pci_scan_single_device(bus, devfn); 1372 if (!dev) 1373 return 0; 1374 if (!dev->is_added) 1375 nr++; 1376 1377 if (pci_ari_enabled(bus)) 1378 next_fn = next_ari_fn; 1379 else if (dev->multifunction) 1380 next_fn = next_trad_fn; 1381 1382 for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) { 1383 dev = pci_scan_single_device(bus, devfn + fn); 1384 if (dev) { 1385 if (!dev->is_added) 1386 nr++; 1387 dev->multifunction = 1; 1388 } 1389 } 1390 1391 /* only one slot has pcie device */ 1392 if (bus->self && nr) 1393 pcie_aspm_init_link_state(bus->self); 1394 1395 return nr; 1396 } 1397 1398 static int pcie_find_smpss(struct pci_dev *dev, void *data) 1399 { 1400 u8 *smpss = data; 1401 1402 if (!pci_is_pcie(dev)) 1403 return 0; 1404 1405 /* For PCIE hotplug enabled slots not connected directly to a 1406 * PCI-E root port, there can be problems when hotplugging 1407 * devices. This is due to the possibility of hotplugging a 1408 * device into the fabric with a smaller MPS that the devices 1409 * currently running have configured. Modifying the MPS on the 1410 * running devices could cause a fatal bus error due to an 1411 * incoming frame being larger than the newly configured MPS. 1412 * To work around this, the MPS for the entire fabric must be 1413 * set to the minimum size. Any devices hotplugged into this 1414 * fabric will have the minimum MPS set. If the PCI hotplug 1415 * slot is directly connected to the root port and there are not 1416 * other devices on the fabric (which seems to be the most 1417 * common case), then this is not an issue and MPS discovery 1418 * will occur as normal. 1419 */ 1420 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || 1421 (dev->bus->self && 1422 dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT))) 1423 *smpss = 0; 1424 1425 if (*smpss > dev->pcie_mpss) 1426 *smpss = dev->pcie_mpss; 1427 1428 return 0; 1429 } 1430 1431 static void pcie_write_mps(struct pci_dev *dev, int mps) 1432 { 1433 int rc; 1434 1435 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 1436 mps = 128 << dev->pcie_mpss; 1437 1438 if (dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self) 1439 /* For "Performance", the assumption is made that 1440 * downstream communication will never be larger than 1441 * the MRRS. So, the MPS only needs to be configured 1442 * for the upstream communication. This being the case, 1443 * walk from the top down and set the MPS of the child 1444 * to that of the parent bus. 1445 * 1446 * Configure the device MPS with the smaller of the 1447 * device MPSS or the bridge MPS (which is assumed to be 1448 * properly configured at this point to the largest 1449 * allowable MPS based on its parent bus). 1450 */ 1451 mps = min(mps, pcie_get_mps(dev->bus->self)); 1452 } 1453 1454 rc = pcie_set_mps(dev, mps); 1455 if (rc) 1456 dev_err(&dev->dev, "Failed attempting to set the MPS\n"); 1457 } 1458 1459 static void pcie_write_mrrs(struct pci_dev *dev) 1460 { 1461 int rc, mrrs; 1462 1463 /* In the "safe" case, do not configure the MRRS. There appear to be 1464 * issues with setting MRRS to 0 on a number of devices. 1465 */ 1466 if (pcie_bus_config != PCIE_BUS_PERFORMANCE) 1467 return; 1468 1469 /* For Max performance, the MRRS must be set to the largest supported 1470 * value. However, it cannot be configured larger than the MPS the 1471 * device or the bus can support. This should already be properly 1472 * configured by a prior call to pcie_write_mps. 1473 */ 1474 mrrs = pcie_get_mps(dev); 1475 1476 /* MRRS is a R/W register. Invalid values can be written, but a 1477 * subsequent read will verify if the value is acceptable or not. 1478 * If the MRRS value provided is not acceptable (e.g., too large), 1479 * shrink the value until it is acceptable to the HW. 1480 */ 1481 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 1482 rc = pcie_set_readrq(dev, mrrs); 1483 if (!rc) 1484 break; 1485 1486 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); 1487 mrrs /= 2; 1488 } 1489 1490 if (mrrs < 128) 1491 dev_err(&dev->dev, "MRRS was unable to be configured with a " 1492 "safe value. If problems are experienced, try running " 1493 "with pci=pcie_bus_safe.\n"); 1494 } 1495 1496 static int pcie_bus_configure_set(struct pci_dev *dev, void *data) 1497 { 1498 int mps, orig_mps; 1499 1500 if (!pci_is_pcie(dev)) 1501 return 0; 1502 1503 mps = 128 << *(u8 *)data; 1504 orig_mps = pcie_get_mps(dev); 1505 1506 pcie_write_mps(dev, mps); 1507 pcie_write_mrrs(dev); 1508 1509 dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), " 1510 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, 1511 orig_mps, pcie_get_readrq(dev)); 1512 1513 return 0; 1514 } 1515 1516 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, 1517 * parents then children fashion. If this changes, then this code will not 1518 * work as designed. 1519 */ 1520 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) 1521 { 1522 u8 smpss; 1523 1524 if (!pci_is_pcie(bus->self)) 1525 return; 1526 1527 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) 1528 return; 1529 1530 /* FIXME - Peer to peer DMA is possible, though the endpoint would need 1531 * to be aware to the MPS of the destination. To work around this, 1532 * simply force the MPS of the entire system to the smallest possible. 1533 */ 1534 if (pcie_bus_config == PCIE_BUS_PEER2PEER) 1535 smpss = 0; 1536 1537 if (pcie_bus_config == PCIE_BUS_SAFE) { 1538 smpss = mpss; 1539 1540 pcie_find_smpss(bus->self, &smpss); 1541 pci_walk_bus(bus, pcie_find_smpss, &smpss); 1542 } 1543 1544 pcie_bus_configure_set(bus->self, &smpss); 1545 pci_walk_bus(bus, pcie_bus_configure_set, &smpss); 1546 } 1547 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); 1548 1549 unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) 1550 { 1551 unsigned int devfn, pass, max = bus->secondary; 1552 struct pci_dev *dev; 1553 1554 dev_dbg(&bus->dev, "scanning bus\n"); 1555 1556 /* Go find them, Rover! */ 1557 for (devfn = 0; devfn < 0x100; devfn += 8) 1558 pci_scan_slot(bus, devfn); 1559 1560 /* Reserve buses for SR-IOV capability. */ 1561 max += pci_iov_bus_range(bus); 1562 1563 /* 1564 * After performing arch-dependent fixup of the bus, look behind 1565 * all PCI-to-PCI bridges on this bus. 1566 */ 1567 if (!bus->is_added) { 1568 dev_dbg(&bus->dev, "fixups for bus\n"); 1569 pcibios_fixup_bus(bus); 1570 if (pci_is_root_bus(bus)) 1571 bus->is_added = 1; 1572 } 1573 1574 for (pass=0; pass < 2; pass++) 1575 list_for_each_entry(dev, &bus->devices, bus_list) { 1576 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 1577 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 1578 max = pci_scan_bridge(bus, dev, max, pass); 1579 } 1580 1581 /* 1582 * We've scanned the bus and so we know all about what's on 1583 * the other side of any bridges that may be on this bus plus 1584 * any devices. 1585 * 1586 * Return how far we've got finding sub-buses. 1587 */ 1588 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); 1589 return max; 1590 } 1591 1592 struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 1593 struct pci_ops *ops, void *sysdata, struct list_head *resources) 1594 { 1595 int error; 1596 struct pci_host_bridge *bridge; 1597 struct pci_bus *b, *b2; 1598 struct pci_host_bridge_window *window, *n; 1599 struct resource *res; 1600 resource_size_t offset; 1601 char bus_addr[64]; 1602 char *fmt; 1603 1604 1605 b = pci_alloc_bus(); 1606 if (!b) 1607 return NULL; 1608 1609 b->sysdata = sysdata; 1610 b->ops = ops; 1611 b2 = pci_find_bus(pci_domain_nr(b), bus); 1612 if (b2) { 1613 /* If we already got to this bus through a different bridge, ignore it */ 1614 dev_dbg(&b2->dev, "bus already known\n"); 1615 goto err_out; 1616 } 1617 1618 bridge = pci_alloc_host_bridge(b); 1619 if (!bridge) 1620 goto err_out; 1621 1622 bridge->dev.parent = parent; 1623 bridge->dev.release = pci_release_bus_bridge_dev; 1624 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus); 1625 error = device_register(&bridge->dev); 1626 if (error) 1627 goto bridge_dev_reg_err; 1628 b->bridge = get_device(&bridge->dev); 1629 device_enable_async_suspend(b->bridge); 1630 pci_set_bus_of_node(b); 1631 1632 if (!parent) 1633 set_dev_node(b->bridge, pcibus_to_node(b)); 1634 1635 b->dev.class = &pcibus_class; 1636 b->dev.parent = b->bridge; 1637 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); 1638 error = device_register(&b->dev); 1639 if (error) 1640 goto class_dev_reg_err; 1641 1642 /* Create legacy_io and legacy_mem files for this bus */ 1643 pci_create_legacy_files(b); 1644 1645 b->number = b->secondary = bus; 1646 1647 if (parent) 1648 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1649 else 1650 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1651 1652 /* Add initial resources to the bus */ 1653 list_for_each_entry_safe(window, n, resources, list) { 1654 list_move_tail(&window->list, &bridge->windows); 1655 res = window->res; 1656 offset = window->offset; 1657 pci_bus_add_resource(b, res, 0); 1658 if (offset) { 1659 if (resource_type(res) == IORESOURCE_IO) 1660 fmt = " (bus address [%#06llx-%#06llx])"; 1661 else 1662 fmt = " (bus address [%#010llx-%#010llx])"; 1663 snprintf(bus_addr, sizeof(bus_addr), fmt, 1664 (unsigned long long) (res->start - offset), 1665 (unsigned long long) (res->end - offset)); 1666 } else 1667 bus_addr[0] = '\0'; 1668 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr); 1669 } 1670 1671 down_write(&pci_bus_sem); 1672 list_add_tail(&b->node, &pci_root_buses); 1673 up_write(&pci_bus_sem); 1674 1675 return b; 1676 1677 class_dev_reg_err: 1678 put_device(&bridge->dev); 1679 device_unregister(&bridge->dev); 1680 bridge_dev_reg_err: 1681 kfree(bridge); 1682 err_out: 1683 kfree(b); 1684 return NULL; 1685 } 1686 1687 struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus, 1688 struct pci_ops *ops, void *sysdata, struct list_head *resources) 1689 { 1690 struct pci_bus *b; 1691 1692 b = pci_create_root_bus(parent, bus, ops, sysdata, resources); 1693 if (!b) 1694 return NULL; 1695 1696 b->subordinate = pci_scan_child_bus(b); 1697 pci_bus_add_devices(b); 1698 return b; 1699 } 1700 EXPORT_SYMBOL(pci_scan_root_bus); 1701 1702 /* Deprecated; use pci_scan_root_bus() instead */ 1703 struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, 1704 int bus, struct pci_ops *ops, void *sysdata) 1705 { 1706 LIST_HEAD(resources); 1707 struct pci_bus *b; 1708 1709 pci_add_resource(&resources, &ioport_resource); 1710 pci_add_resource(&resources, &iomem_resource); 1711 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources); 1712 if (b) 1713 b->subordinate = pci_scan_child_bus(b); 1714 else 1715 pci_free_resource_list(&resources); 1716 return b; 1717 } 1718 EXPORT_SYMBOL(pci_scan_bus_parented); 1719 1720 struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, 1721 void *sysdata) 1722 { 1723 LIST_HEAD(resources); 1724 struct pci_bus *b; 1725 1726 pci_add_resource(&resources, &ioport_resource); 1727 pci_add_resource(&resources, &iomem_resource); 1728 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); 1729 if (b) { 1730 b->subordinate = pci_scan_child_bus(b); 1731 pci_bus_add_devices(b); 1732 } else { 1733 pci_free_resource_list(&resources); 1734 } 1735 return b; 1736 } 1737 EXPORT_SYMBOL(pci_scan_bus); 1738 1739 #ifdef CONFIG_HOTPLUG 1740 /** 1741 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices. 1742 * @bridge: PCI bridge for the bus to scan 1743 * 1744 * Scan a PCI bus and child buses for new devices, add them, 1745 * and enable them, resizing bridge mmio/io resource if necessary 1746 * and possible. The caller must ensure the child devices are already 1747 * removed for resizing to occur. 1748 * 1749 * Returns the max number of subordinate bus discovered. 1750 */ 1751 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge) 1752 { 1753 unsigned int max; 1754 struct pci_bus *bus = bridge->subordinate; 1755 1756 max = pci_scan_child_bus(bus); 1757 1758 pci_assign_unassigned_bridge_resources(bridge); 1759 1760 pci_bus_add_devices(bus); 1761 1762 return max; 1763 } 1764 1765 EXPORT_SYMBOL(pci_add_new_bus); 1766 EXPORT_SYMBOL(pci_scan_slot); 1767 EXPORT_SYMBOL(pci_scan_bridge); 1768 EXPORT_SYMBOL_GPL(pci_scan_child_bus); 1769 #endif 1770 1771 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) 1772 { 1773 const struct pci_dev *a = to_pci_dev(d_a); 1774 const struct pci_dev *b = to_pci_dev(d_b); 1775 1776 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 1777 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 1778 1779 if (a->bus->number < b->bus->number) return -1; 1780 else if (a->bus->number > b->bus->number) return 1; 1781 1782 if (a->devfn < b->devfn) return -1; 1783 else if (a->devfn > b->devfn) return 1; 1784 1785 return 0; 1786 } 1787 1788 void __init pci_sort_breadthfirst(void) 1789 { 1790 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); 1791 } 1792