1 /* 2 * probe.c - PCI detection and setup code 3 */ 4 5 #include <linux/kernel.h> 6 #include <linux/delay.h> 7 #include <linux/init.h> 8 #include <linux/pci.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/cpumask.h> 12 #include <linux/pci-aspm.h> 13 #include <asm-generic/pci-bridge.h> 14 #include "pci.h" 15 16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 17 #define CARDBUS_RESERVE_BUSNR 3 18 19 struct resource busn_resource = { 20 .name = "PCI busn", 21 .start = 0, 22 .end = 255, 23 .flags = IORESOURCE_BUS, 24 }; 25 26 /* Ugh. Need to stop exporting this to modules. */ 27 LIST_HEAD(pci_root_buses); 28 EXPORT_SYMBOL(pci_root_buses); 29 30 static LIST_HEAD(pci_domain_busn_res_list); 31 32 struct pci_domain_busn_res { 33 struct list_head list; 34 struct resource res; 35 int domain_nr; 36 }; 37 38 static struct resource *get_pci_domain_busn_res(int domain_nr) 39 { 40 struct pci_domain_busn_res *r; 41 42 list_for_each_entry(r, &pci_domain_busn_res_list, list) 43 if (r->domain_nr == domain_nr) 44 return &r->res; 45 46 r = kzalloc(sizeof(*r), GFP_KERNEL); 47 if (!r) 48 return NULL; 49 50 r->domain_nr = domain_nr; 51 r->res.start = 0; 52 r->res.end = 0xff; 53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED; 54 55 list_add_tail(&r->list, &pci_domain_busn_res_list); 56 57 return &r->res; 58 } 59 60 static int find_anything(struct device *dev, void *data) 61 { 62 return 1; 63 } 64 65 /* 66 * Some device drivers need know if pci is initiated. 67 * Basically, we think pci is not initiated when there 68 * is no device to be found on the pci_bus_type. 69 */ 70 int no_pci_devices(void) 71 { 72 struct device *dev; 73 int no_devices; 74 75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); 76 no_devices = (dev == NULL); 77 put_device(dev); 78 return no_devices; 79 } 80 EXPORT_SYMBOL(no_pci_devices); 81 82 /* 83 * PCI Bus Class 84 */ 85 static void release_pcibus_dev(struct device *dev) 86 { 87 struct pci_bus *pci_bus = to_pci_bus(dev); 88 89 if (pci_bus->bridge) 90 put_device(pci_bus->bridge); 91 pci_bus_remove_resources(pci_bus); 92 pci_release_bus_of_node(pci_bus); 93 kfree(pci_bus); 94 } 95 96 static struct class pcibus_class = { 97 .name = "pci_bus", 98 .dev_release = &release_pcibus_dev, 99 .dev_attrs = pcibus_dev_attrs, 100 }; 101 102 static int __init pcibus_class_init(void) 103 { 104 return class_register(&pcibus_class); 105 } 106 postcore_initcall(pcibus_class_init); 107 108 static u64 pci_size(u64 base, u64 maxbase, u64 mask) 109 { 110 u64 size = mask & maxbase; /* Find the significant bits */ 111 if (!size) 112 return 0; 113 114 /* Get the lowest of them to find the decode size, and 115 from that the extent. */ 116 size = (size & ~(size-1)) - 1; 117 118 /* base == maxbase can be valid only if the BAR has 119 already been programmed with all 1s. */ 120 if (base == maxbase && ((base | size) & mask) != mask) 121 return 0; 122 123 return size; 124 } 125 126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) 127 { 128 u32 mem_type; 129 unsigned long flags; 130 131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; 133 flags |= IORESOURCE_IO; 134 return flags; 135 } 136 137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; 138 flags |= IORESOURCE_MEM; 139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) 140 flags |= IORESOURCE_PREFETCH; 141 142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; 143 switch (mem_type) { 144 case PCI_BASE_ADDRESS_MEM_TYPE_32: 145 break; 146 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 147 /* 1M mem BAR treated as 32-bit BAR */ 148 break; 149 case PCI_BASE_ADDRESS_MEM_TYPE_64: 150 flags |= IORESOURCE_MEM_64; 151 break; 152 default: 153 /* mem unknown type treated as 32-bit BAR */ 154 break; 155 } 156 return flags; 157 } 158 159 /** 160 * pci_read_base - read a PCI BAR 161 * @dev: the PCI device 162 * @type: type of the BAR 163 * @res: resource buffer to be filled in 164 * @pos: BAR position in the config space 165 * 166 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. 167 */ 168 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 169 struct resource *res, unsigned int pos) 170 { 171 u32 l, sz, mask; 172 u16 orig_cmd; 173 struct pci_bus_region region; 174 bool bar_too_big = false, bar_disabled = false; 175 176 mask = type ? PCI_ROM_ADDRESS_MASK : ~0; 177 178 /* No printks while decoding is disabled! */ 179 if (!dev->mmio_always_on) { 180 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); 181 pci_write_config_word(dev, PCI_COMMAND, 182 orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)); 183 } 184 185 res->name = pci_name(dev); 186 187 pci_read_config_dword(dev, pos, &l); 188 pci_write_config_dword(dev, pos, l | mask); 189 pci_read_config_dword(dev, pos, &sz); 190 pci_write_config_dword(dev, pos, l); 191 192 /* 193 * All bits set in sz means the device isn't working properly. 194 * If the BAR isn't implemented, all bits must be 0. If it's a 195 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit 196 * 1 must be clear. 197 */ 198 if (!sz || sz == 0xffffffff) 199 goto fail; 200 201 /* 202 * I don't know how l can have all bits set. Copied from old code. 203 * Maybe it fixes a bug on some ancient platform. 204 */ 205 if (l == 0xffffffff) 206 l = 0; 207 208 if (type == pci_bar_unknown) { 209 res->flags = decode_bar(dev, l); 210 res->flags |= IORESOURCE_SIZEALIGN; 211 if (res->flags & IORESOURCE_IO) { 212 l &= PCI_BASE_ADDRESS_IO_MASK; 213 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; 214 } else { 215 l &= PCI_BASE_ADDRESS_MEM_MASK; 216 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 217 } 218 } else { 219 res->flags |= (l & IORESOURCE_ROM_ENABLE); 220 l &= PCI_ROM_ADDRESS_MASK; 221 mask = (u32)PCI_ROM_ADDRESS_MASK; 222 } 223 224 if (res->flags & IORESOURCE_MEM_64) { 225 u64 l64 = l; 226 u64 sz64 = sz; 227 u64 mask64 = mask | (u64)~0 << 32; 228 229 pci_read_config_dword(dev, pos + 4, &l); 230 pci_write_config_dword(dev, pos + 4, ~0); 231 pci_read_config_dword(dev, pos + 4, &sz); 232 pci_write_config_dword(dev, pos + 4, l); 233 234 l64 |= ((u64)l << 32); 235 sz64 |= ((u64)sz << 32); 236 237 sz64 = pci_size(l64, sz64, mask64); 238 239 if (!sz64) 240 goto fail; 241 242 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { 243 bar_too_big = true; 244 goto fail; 245 } 246 247 if ((sizeof(resource_size_t) < 8) && l) { 248 /* Address above 32-bit boundary; disable the BAR */ 249 pci_write_config_dword(dev, pos, 0); 250 pci_write_config_dword(dev, pos + 4, 0); 251 region.start = 0; 252 region.end = sz64; 253 pcibios_bus_to_resource(dev, res, ®ion); 254 bar_disabled = true; 255 } else { 256 region.start = l64; 257 region.end = l64 + sz64; 258 pcibios_bus_to_resource(dev, res, ®ion); 259 } 260 } else { 261 sz = pci_size(l, sz, mask); 262 263 if (!sz) 264 goto fail; 265 266 region.start = l; 267 region.end = l + sz; 268 pcibios_bus_to_resource(dev, res, ®ion); 269 } 270 271 goto out; 272 273 274 fail: 275 res->flags = 0; 276 out: 277 if (!dev->mmio_always_on) 278 pci_write_config_word(dev, PCI_COMMAND, orig_cmd); 279 280 if (bar_too_big) 281 dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); 282 if (res->flags && !bar_disabled) 283 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); 284 285 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 286 } 287 288 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 289 { 290 unsigned int pos, reg; 291 292 for (pos = 0; pos < howmany; pos++) { 293 struct resource *res = &dev->resource[pos]; 294 reg = PCI_BASE_ADDRESS_0 + (pos << 2); 295 pos += __pci_read_base(dev, pci_bar_unknown, res, reg); 296 } 297 298 if (rom) { 299 struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; 300 dev->rom_base_reg = rom; 301 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | 302 IORESOURCE_READONLY | IORESOURCE_CACHEABLE | 303 IORESOURCE_SIZEALIGN; 304 __pci_read_base(dev, pci_bar_mem32, res, rom); 305 } 306 } 307 308 static void pci_read_bridge_io(struct pci_bus *child) 309 { 310 struct pci_dev *dev = child->self; 311 u8 io_base_lo, io_limit_lo; 312 unsigned long io_mask, io_granularity, base, limit; 313 struct pci_bus_region region; 314 struct resource *res; 315 316 io_mask = PCI_IO_RANGE_MASK; 317 io_granularity = 0x1000; 318 if (dev->io_window_1k) { 319 /* Support 1K I/O space granularity */ 320 io_mask = PCI_IO_1K_RANGE_MASK; 321 io_granularity = 0x400; 322 } 323 324 res = child->resource[0]; 325 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 326 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 327 base = (io_base_lo & io_mask) << 8; 328 limit = (io_limit_lo & io_mask) << 8; 329 330 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { 331 u16 io_base_hi, io_limit_hi; 332 333 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); 334 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); 335 base |= ((unsigned long) io_base_hi << 16); 336 limit |= ((unsigned long) io_limit_hi << 16); 337 } 338 339 if (base <= limit) { 340 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; 341 region.start = base; 342 region.end = limit + io_granularity - 1; 343 pcibios_bus_to_resource(dev, res, ®ion); 344 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 345 } 346 } 347 348 static void pci_read_bridge_mmio(struct pci_bus *child) 349 { 350 struct pci_dev *dev = child->self; 351 u16 mem_base_lo, mem_limit_lo; 352 unsigned long base, limit; 353 struct pci_bus_region region; 354 struct resource *res; 355 356 res = child->resource[1]; 357 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); 358 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); 359 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; 360 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; 361 if (base <= limit) { 362 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 363 region.start = base; 364 region.end = limit + 0xfffff; 365 pcibios_bus_to_resource(dev, res, ®ion); 366 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 367 } 368 } 369 370 static void pci_read_bridge_mmio_pref(struct pci_bus *child) 371 { 372 struct pci_dev *dev = child->self; 373 u16 mem_base_lo, mem_limit_lo; 374 unsigned long base, limit; 375 struct pci_bus_region region; 376 struct resource *res; 377 378 res = child->resource[2]; 379 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); 380 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); 381 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; 382 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; 383 384 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { 385 u32 mem_base_hi, mem_limit_hi; 386 387 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); 388 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); 389 390 /* 391 * Some bridges set the base > limit by default, and some 392 * (broken) BIOSes do not initialize them. If we find 393 * this, just assume they are not being used. 394 */ 395 if (mem_base_hi <= mem_limit_hi) { 396 #if BITS_PER_LONG == 64 397 base |= ((unsigned long) mem_base_hi) << 32; 398 limit |= ((unsigned long) mem_limit_hi) << 32; 399 #else 400 if (mem_base_hi || mem_limit_hi) { 401 dev_err(&dev->dev, "can't handle 64-bit " 402 "address space for bridge\n"); 403 return; 404 } 405 #endif 406 } 407 } 408 if (base <= limit) { 409 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | 410 IORESOURCE_MEM | IORESOURCE_PREFETCH; 411 if (res->flags & PCI_PREF_RANGE_TYPE_64) 412 res->flags |= IORESOURCE_MEM_64; 413 region.start = base; 414 region.end = limit + 0xfffff; 415 pcibios_bus_to_resource(dev, res, ®ion); 416 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 417 } 418 } 419 420 void pci_read_bridge_bases(struct pci_bus *child) 421 { 422 struct pci_dev *dev = child->self; 423 struct resource *res; 424 int i; 425 426 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 427 return; 428 429 dev_info(&dev->dev, "PCI bridge to %pR%s\n", 430 &child->busn_res, 431 dev->transparent ? " (subtractive decode)" : ""); 432 433 pci_bus_remove_resources(child); 434 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 435 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; 436 437 pci_read_bridge_io(child); 438 pci_read_bridge_mmio(child); 439 pci_read_bridge_mmio_pref(child); 440 441 if (dev->transparent) { 442 pci_bus_for_each_resource(child->parent, res, i) { 443 if (res) { 444 pci_bus_add_resource(child, res, 445 PCI_SUBTRACTIVE_DECODE); 446 dev_printk(KERN_DEBUG, &dev->dev, 447 " bridge window %pR (subtractive decode)\n", 448 res); 449 } 450 } 451 } 452 } 453 454 static struct pci_bus * pci_alloc_bus(void) 455 { 456 struct pci_bus *b; 457 458 b = kzalloc(sizeof(*b), GFP_KERNEL); 459 if (b) { 460 INIT_LIST_HEAD(&b->node); 461 INIT_LIST_HEAD(&b->children); 462 INIT_LIST_HEAD(&b->devices); 463 INIT_LIST_HEAD(&b->slots); 464 INIT_LIST_HEAD(&b->resources); 465 b->max_bus_speed = PCI_SPEED_UNKNOWN; 466 b->cur_bus_speed = PCI_SPEED_UNKNOWN; 467 } 468 return b; 469 } 470 471 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b) 472 { 473 struct pci_host_bridge *bridge; 474 475 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 476 if (bridge) { 477 INIT_LIST_HEAD(&bridge->windows); 478 bridge->bus = b; 479 } 480 481 return bridge; 482 } 483 484 static unsigned char pcix_bus_speed[] = { 485 PCI_SPEED_UNKNOWN, /* 0 */ 486 PCI_SPEED_66MHz_PCIX, /* 1 */ 487 PCI_SPEED_100MHz_PCIX, /* 2 */ 488 PCI_SPEED_133MHz_PCIX, /* 3 */ 489 PCI_SPEED_UNKNOWN, /* 4 */ 490 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ 491 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ 492 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ 493 PCI_SPEED_UNKNOWN, /* 8 */ 494 PCI_SPEED_66MHz_PCIX_266, /* 9 */ 495 PCI_SPEED_100MHz_PCIX_266, /* A */ 496 PCI_SPEED_133MHz_PCIX_266, /* B */ 497 PCI_SPEED_UNKNOWN, /* C */ 498 PCI_SPEED_66MHz_PCIX_533, /* D */ 499 PCI_SPEED_100MHz_PCIX_533, /* E */ 500 PCI_SPEED_133MHz_PCIX_533 /* F */ 501 }; 502 503 static unsigned char pcie_link_speed[] = { 504 PCI_SPEED_UNKNOWN, /* 0 */ 505 PCIE_SPEED_2_5GT, /* 1 */ 506 PCIE_SPEED_5_0GT, /* 2 */ 507 PCIE_SPEED_8_0GT, /* 3 */ 508 PCI_SPEED_UNKNOWN, /* 4 */ 509 PCI_SPEED_UNKNOWN, /* 5 */ 510 PCI_SPEED_UNKNOWN, /* 6 */ 511 PCI_SPEED_UNKNOWN, /* 7 */ 512 PCI_SPEED_UNKNOWN, /* 8 */ 513 PCI_SPEED_UNKNOWN, /* 9 */ 514 PCI_SPEED_UNKNOWN, /* A */ 515 PCI_SPEED_UNKNOWN, /* B */ 516 PCI_SPEED_UNKNOWN, /* C */ 517 PCI_SPEED_UNKNOWN, /* D */ 518 PCI_SPEED_UNKNOWN, /* E */ 519 PCI_SPEED_UNKNOWN /* F */ 520 }; 521 522 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) 523 { 524 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; 525 } 526 EXPORT_SYMBOL_GPL(pcie_update_link_speed); 527 528 static unsigned char agp_speeds[] = { 529 AGP_UNKNOWN, 530 AGP_1X, 531 AGP_2X, 532 AGP_4X, 533 AGP_8X 534 }; 535 536 static enum pci_bus_speed agp_speed(int agp3, int agpstat) 537 { 538 int index = 0; 539 540 if (agpstat & 4) 541 index = 3; 542 else if (agpstat & 2) 543 index = 2; 544 else if (agpstat & 1) 545 index = 1; 546 else 547 goto out; 548 549 if (agp3) { 550 index += 2; 551 if (index == 5) 552 index = 0; 553 } 554 555 out: 556 return agp_speeds[index]; 557 } 558 559 560 static void pci_set_bus_speed(struct pci_bus *bus) 561 { 562 struct pci_dev *bridge = bus->self; 563 int pos; 564 565 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); 566 if (!pos) 567 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); 568 if (pos) { 569 u32 agpstat, agpcmd; 570 571 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); 572 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); 573 574 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); 575 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); 576 } 577 578 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); 579 if (pos) { 580 u16 status; 581 enum pci_bus_speed max; 582 583 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS, 584 &status); 585 586 if (status & PCI_X_SSTATUS_533MHZ) { 587 max = PCI_SPEED_133MHz_PCIX_533; 588 } else if (status & PCI_X_SSTATUS_266MHZ) { 589 max = PCI_SPEED_133MHz_PCIX_266; 590 } else if (status & PCI_X_SSTATUS_133MHZ) { 591 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) { 592 max = PCI_SPEED_133MHz_PCIX_ECC; 593 } else { 594 max = PCI_SPEED_133MHz_PCIX; 595 } 596 } else { 597 max = PCI_SPEED_66MHz_PCIX; 598 } 599 600 bus->max_bus_speed = max; 601 bus->cur_bus_speed = pcix_bus_speed[ 602 (status & PCI_X_SSTATUS_FREQ) >> 6]; 603 604 return; 605 } 606 607 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); 608 if (pos) { 609 u32 linkcap; 610 u16 linksta; 611 612 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); 613 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; 614 615 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); 616 pcie_update_link_speed(bus, linksta); 617 } 618 } 619 620 621 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, 622 struct pci_dev *bridge, int busnr) 623 { 624 struct pci_bus *child; 625 int i; 626 627 /* 628 * Allocate a new bus, and inherit stuff from the parent.. 629 */ 630 child = pci_alloc_bus(); 631 if (!child) 632 return NULL; 633 634 child->parent = parent; 635 child->ops = parent->ops; 636 child->sysdata = parent->sysdata; 637 child->bus_flags = parent->bus_flags; 638 639 /* initialize some portions of the bus device, but don't register it 640 * now as the parent is not properly set up yet. This device will get 641 * registered later in pci_bus_add_devices() 642 */ 643 child->dev.class = &pcibus_class; 644 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); 645 646 /* 647 * Set up the primary, secondary and subordinate 648 * bus numbers. 649 */ 650 child->number = child->busn_res.start = busnr; 651 child->primary = parent->busn_res.start; 652 child->busn_res.end = 0xff; 653 654 if (!bridge) 655 return child; 656 657 child->self = bridge; 658 child->bridge = get_device(&bridge->dev); 659 pci_set_bus_of_node(child); 660 pci_set_bus_speed(child); 661 662 /* Set up default resource pointers and names.. */ 663 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 664 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 665 child->resource[i]->name = child->name; 666 } 667 bridge->subordinate = child; 668 669 return child; 670 } 671 672 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) 673 { 674 struct pci_bus *child; 675 676 child = pci_alloc_child_bus(parent, dev, busnr); 677 if (child) { 678 down_write(&pci_bus_sem); 679 list_add_tail(&child->node, &parent->children); 680 up_write(&pci_bus_sem); 681 } 682 return child; 683 } 684 685 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) 686 { 687 struct pci_bus *parent = child->parent; 688 689 /* Attempts to fix that up are really dangerous unless 690 we're going to re-assign all bus numbers. */ 691 if (!pcibios_assign_all_busses()) 692 return; 693 694 while (parent->parent && parent->busn_res.end < max) { 695 parent->busn_res.end = max; 696 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); 697 parent = parent->parent; 698 } 699 } 700 701 /* 702 * If it's a bridge, configure it and scan the bus behind it. 703 * For CardBus bridges, we don't scan behind as the devices will 704 * be handled by the bridge driver itself. 705 * 706 * We need to process bridges in two passes -- first we scan those 707 * already configured by the BIOS and after we are done with all of 708 * them, we proceed to assigning numbers to the remaining buses in 709 * order to avoid overlaps between old and new bus numbers. 710 */ 711 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) 712 { 713 struct pci_bus *child; 714 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 715 u32 buses, i, j = 0; 716 u16 bctl; 717 u8 primary, secondary, subordinate; 718 int broken = 0; 719 720 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 721 primary = buses & 0xFF; 722 secondary = (buses >> 8) & 0xFF; 723 subordinate = (buses >> 16) & 0xFF; 724 725 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", 726 secondary, subordinate, pass); 727 728 if (!primary && (primary != bus->number) && secondary && subordinate) { 729 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); 730 primary = bus->number; 731 } 732 733 /* Check if setup is sensible at all */ 734 if (!pass && 735 (primary != bus->number || secondary <= bus->number || 736 secondary > subordinate)) { 737 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", 738 secondary, subordinate); 739 broken = 1; 740 } 741 742 /* Disable MasterAbortMode during probing to avoid reporting 743 of bus errors (in some architectures) */ 744 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 745 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 746 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 747 748 if ((secondary || subordinate) && !pcibios_assign_all_busses() && 749 !is_cardbus && !broken) { 750 unsigned int cmax; 751 /* 752 * Bus already configured by firmware, process it in the first 753 * pass and just note the configuration. 754 */ 755 if (pass) 756 goto out; 757 758 /* 759 * If we already got to this bus through a different bridge, 760 * don't re-add it. This can happen with the i450NX chipset. 761 * 762 * However, we continue to descend down the hierarchy and 763 * scan remaining child buses. 764 */ 765 child = pci_find_bus(pci_domain_nr(bus), secondary); 766 if (!child) { 767 child = pci_add_new_bus(bus, dev, secondary); 768 if (!child) 769 goto out; 770 child->primary = primary; 771 pci_bus_insert_busn_res(child, secondary, subordinate); 772 child->bridge_ctl = bctl; 773 } 774 775 cmax = pci_scan_child_bus(child); 776 if (cmax > max) 777 max = cmax; 778 if (child->busn_res.end > max) 779 max = child->busn_res.end; 780 } else { 781 /* 782 * We need to assign a number to this bus which we always 783 * do in the second pass. 784 */ 785 if (!pass) { 786 if (pcibios_assign_all_busses() || broken) 787 /* Temporarily disable forwarding of the 788 configuration cycles on all bridges in 789 this bus segment to avoid possible 790 conflicts in the second pass between two 791 bridges programmed with overlapping 792 bus ranges. */ 793 pci_write_config_dword(dev, PCI_PRIMARY_BUS, 794 buses & ~0xffffff); 795 goto out; 796 } 797 798 /* Clear errors */ 799 pci_write_config_word(dev, PCI_STATUS, 0xffff); 800 801 /* Prevent assigning a bus number that already exists. 802 * This can happen when a bridge is hot-plugged, so in 803 * this case we only re-scan this bus. */ 804 child = pci_find_bus(pci_domain_nr(bus), max+1); 805 if (!child) { 806 child = pci_add_new_bus(bus, dev, ++max); 807 if (!child) 808 goto out; 809 pci_bus_insert_busn_res(child, max, 0xff); 810 } 811 buses = (buses & 0xff000000) 812 | ((unsigned int)(child->primary) << 0) 813 | ((unsigned int)(child->busn_res.start) << 8) 814 | ((unsigned int)(child->busn_res.end) << 16); 815 816 /* 817 * yenta.c forces a secondary latency timer of 176. 818 * Copy that behaviour here. 819 */ 820 if (is_cardbus) { 821 buses &= ~0xff000000; 822 buses |= CARDBUS_LATENCY_TIMER << 24; 823 } 824 825 /* 826 * We need to blast all three values with a single write. 827 */ 828 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 829 830 if (!is_cardbus) { 831 child->bridge_ctl = bctl; 832 /* 833 * Adjust subordinate busnr in parent buses. 834 * We do this before scanning for children because 835 * some devices may not be detected if the bios 836 * was lazy. 837 */ 838 pci_fixup_parent_subordinate_busnr(child, max); 839 /* Now we can scan all subordinate buses... */ 840 max = pci_scan_child_bus(child); 841 /* 842 * now fix it up again since we have found 843 * the real value of max. 844 */ 845 pci_fixup_parent_subordinate_busnr(child, max); 846 } else { 847 /* 848 * For CardBus bridges, we leave 4 bus numbers 849 * as cards with a PCI-to-PCI bridge can be 850 * inserted later. 851 */ 852 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) { 853 struct pci_bus *parent = bus; 854 if (pci_find_bus(pci_domain_nr(bus), 855 max+i+1)) 856 break; 857 while (parent->parent) { 858 if ((!pcibios_assign_all_busses()) && 859 (parent->busn_res.end > max) && 860 (parent->busn_res.end <= max+i)) { 861 j = 1; 862 } 863 parent = parent->parent; 864 } 865 if (j) { 866 /* 867 * Often, there are two cardbus bridges 868 * -- try to leave one valid bus number 869 * for each one. 870 */ 871 i /= 2; 872 break; 873 } 874 } 875 max += i; 876 pci_fixup_parent_subordinate_busnr(child, max); 877 } 878 /* 879 * Set the subordinate bus number to its real value. 880 */ 881 pci_bus_update_busn_res_end(child, max); 882 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); 883 } 884 885 sprintf(child->name, 886 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), 887 pci_domain_nr(bus), child->number); 888 889 /* Has only triggered on CardBus, fixup is in yenta_socket */ 890 while (bus->parent) { 891 if ((child->busn_res.end > bus->busn_res.end) || 892 (child->number > bus->busn_res.end) || 893 (child->number < bus->number) || 894 (child->busn_res.end < bus->number)) { 895 dev_info(&child->dev, "%pR %s " 896 "hidden behind%s bridge %s %pR\n", 897 &child->busn_res, 898 (bus->number > child->busn_res.end && 899 bus->busn_res.end < child->number) ? 900 "wholly" : "partially", 901 bus->self->transparent ? " transparent" : "", 902 dev_name(&bus->dev), 903 &bus->busn_res); 904 } 905 bus = bus->parent; 906 } 907 908 out: 909 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); 910 911 return max; 912 } 913 914 /* 915 * Read interrupt line and base address registers. 916 * The architecture-dependent code can tweak these, of course. 917 */ 918 static void pci_read_irq(struct pci_dev *dev) 919 { 920 unsigned char irq; 921 922 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); 923 dev->pin = irq; 924 if (irq) 925 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 926 dev->irq = irq; 927 } 928 929 void set_pcie_port_type(struct pci_dev *pdev) 930 { 931 int pos; 932 u16 reg16; 933 934 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 935 if (!pos) 936 return; 937 pdev->is_pcie = 1; 938 pdev->pcie_cap = pos; 939 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); 940 pdev->pcie_flags_reg = reg16; 941 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); 942 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 943 } 944 945 void set_pcie_hotplug_bridge(struct pci_dev *pdev) 946 { 947 u32 reg32; 948 949 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); 950 if (reg32 & PCI_EXP_SLTCAP_HPC) 951 pdev->is_hotplug_bridge = 1; 952 } 953 954 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 955 956 /** 957 * pci_setup_device - fill in class and map information of a device 958 * @dev: the device structure to fill 959 * 960 * Initialize the device structure with information about the device's 961 * vendor,class,memory and IO-space addresses,IRQ lines etc. 962 * Called at initialisation of the PCI subsystem and by CardBus services. 963 * Returns 0 on success and negative if unknown type of device (not normal, 964 * bridge or CardBus). 965 */ 966 int pci_setup_device(struct pci_dev *dev) 967 { 968 u32 class; 969 u8 hdr_type; 970 struct pci_slot *slot; 971 int pos = 0; 972 struct pci_bus_region region; 973 struct resource *res; 974 975 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) 976 return -EIO; 977 978 dev->sysdata = dev->bus->sysdata; 979 dev->dev.parent = dev->bus->bridge; 980 dev->dev.bus = &pci_bus_type; 981 dev->dev.type = &pci_dev_type; 982 dev->hdr_type = hdr_type & 0x7f; 983 dev->multifunction = !!(hdr_type & 0x80); 984 dev->error_state = pci_channel_io_normal; 985 set_pcie_port_type(dev); 986 987 list_for_each_entry(slot, &dev->bus->slots, list) 988 if (PCI_SLOT(dev->devfn) == slot->number) 989 dev->slot = slot; 990 991 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 992 set this higher, assuming the system even supports it. */ 993 dev->dma_mask = 0xffffffff; 994 995 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 996 dev->bus->number, PCI_SLOT(dev->devfn), 997 PCI_FUNC(dev->devfn)); 998 999 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 1000 dev->revision = class & 0xff; 1001 dev->class = class >> 8; /* upper 3 bytes */ 1002 1003 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n", 1004 dev->vendor, dev->device, dev->hdr_type, dev->class); 1005 1006 /* need to have dev->class ready */ 1007 dev->cfg_size = pci_cfg_space_size(dev); 1008 1009 /* "Unknown power state" */ 1010 dev->current_state = PCI_UNKNOWN; 1011 1012 /* Early fixups, before probing the BARs */ 1013 pci_fixup_device(pci_fixup_early, dev); 1014 /* device class may be changed after fixup */ 1015 class = dev->class >> 8; 1016 1017 switch (dev->hdr_type) { /* header type */ 1018 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 1019 if (class == PCI_CLASS_BRIDGE_PCI) 1020 goto bad; 1021 pci_read_irq(dev); 1022 pci_read_bases(dev, 6, PCI_ROM_ADDRESS); 1023 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1024 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); 1025 1026 /* 1027 * Do the ugly legacy mode stuff here rather than broken chip 1028 * quirk code. Legacy mode ATA controllers have fixed 1029 * addresses. These are not always echoed in BAR0-3, and 1030 * BAR0-3 in a few cases contain junk! 1031 */ 1032 if (class == PCI_CLASS_STORAGE_IDE) { 1033 u8 progif; 1034 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 1035 if ((progif & 1) == 0) { 1036 region.start = 0x1F0; 1037 region.end = 0x1F7; 1038 res = &dev->resource[0]; 1039 res->flags = LEGACY_IO_RESOURCE; 1040 pcibios_bus_to_resource(dev, res, ®ion); 1041 region.start = 0x3F6; 1042 region.end = 0x3F6; 1043 res = &dev->resource[1]; 1044 res->flags = LEGACY_IO_RESOURCE; 1045 pcibios_bus_to_resource(dev, res, ®ion); 1046 } 1047 if ((progif & 4) == 0) { 1048 region.start = 0x170; 1049 region.end = 0x177; 1050 res = &dev->resource[2]; 1051 res->flags = LEGACY_IO_RESOURCE; 1052 pcibios_bus_to_resource(dev, res, ®ion); 1053 region.start = 0x376; 1054 region.end = 0x376; 1055 res = &dev->resource[3]; 1056 res->flags = LEGACY_IO_RESOURCE; 1057 pcibios_bus_to_resource(dev, res, ®ion); 1058 } 1059 } 1060 break; 1061 1062 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ 1063 if (class != PCI_CLASS_BRIDGE_PCI) 1064 goto bad; 1065 /* The PCI-to-PCI bridge spec requires that subtractive 1066 decoding (i.e. transparent) bridge must have programming 1067 interface code of 0x01. */ 1068 pci_read_irq(dev); 1069 dev->transparent = ((dev->class & 0xff) == 1); 1070 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 1071 set_pcie_hotplug_bridge(dev); 1072 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); 1073 if (pos) { 1074 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); 1075 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); 1076 } 1077 break; 1078 1079 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 1080 if (class != PCI_CLASS_BRIDGE_CARDBUS) 1081 goto bad; 1082 pci_read_irq(dev); 1083 pci_read_bases(dev, 1, 0); 1084 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1085 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); 1086 break; 1087 1088 default: /* unknown header */ 1089 dev_err(&dev->dev, "unknown header type %02x, " 1090 "ignoring device\n", dev->hdr_type); 1091 return -EIO; 1092 1093 bad: 1094 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header " 1095 "type %02x)\n", dev->class, dev->hdr_type); 1096 dev->class = PCI_CLASS_NOT_DEFINED; 1097 } 1098 1099 /* We found a fine healthy device, go go go... */ 1100 return 0; 1101 } 1102 1103 static void pci_release_capabilities(struct pci_dev *dev) 1104 { 1105 pci_vpd_release(dev); 1106 pci_iov_release(dev); 1107 pci_free_cap_save_buffers(dev); 1108 } 1109 1110 /** 1111 * pci_release_dev - free a pci device structure when all users of it are finished. 1112 * @dev: device that's been disconnected 1113 * 1114 * Will be called only by the device core when all users of this pci device are 1115 * done. 1116 */ 1117 static void pci_release_dev(struct device *dev) 1118 { 1119 struct pci_dev *pci_dev; 1120 1121 pci_dev = to_pci_dev(dev); 1122 pci_release_capabilities(pci_dev); 1123 pci_release_of_node(pci_dev); 1124 kfree(pci_dev); 1125 } 1126 1127 /** 1128 * pci_cfg_space_size - get the configuration space size of the PCI device. 1129 * @dev: PCI device 1130 * 1131 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices 1132 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 1133 * access it. Maybe we don't have a way to generate extended config space 1134 * accesses, or the device is behind a reverse Express bridge. So we try 1135 * reading the dword at 0x100 which must either be 0 or a valid extended 1136 * capability header. 1137 */ 1138 int pci_cfg_space_size_ext(struct pci_dev *dev) 1139 { 1140 u32 status; 1141 int pos = PCI_CFG_SPACE_SIZE; 1142 1143 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) 1144 goto fail; 1145 if (status == 0xffffffff) 1146 goto fail; 1147 1148 return PCI_CFG_SPACE_EXP_SIZE; 1149 1150 fail: 1151 return PCI_CFG_SPACE_SIZE; 1152 } 1153 1154 int pci_cfg_space_size(struct pci_dev *dev) 1155 { 1156 int pos; 1157 u32 status; 1158 u16 class; 1159 1160 class = dev->class >> 8; 1161 if (class == PCI_CLASS_BRIDGE_HOST) 1162 return pci_cfg_space_size_ext(dev); 1163 1164 if (!pci_is_pcie(dev)) { 1165 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1166 if (!pos) 1167 goto fail; 1168 1169 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); 1170 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) 1171 goto fail; 1172 } 1173 1174 return pci_cfg_space_size_ext(dev); 1175 1176 fail: 1177 return PCI_CFG_SPACE_SIZE; 1178 } 1179 1180 static void pci_release_bus_bridge_dev(struct device *dev) 1181 { 1182 struct pci_host_bridge *bridge = to_pci_host_bridge(dev); 1183 1184 if (bridge->release_fn) 1185 bridge->release_fn(bridge); 1186 1187 pci_free_resource_list(&bridge->windows); 1188 1189 kfree(bridge); 1190 } 1191 1192 struct pci_dev *alloc_pci_dev(void) 1193 { 1194 struct pci_dev *dev; 1195 1196 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); 1197 if (!dev) 1198 return NULL; 1199 1200 INIT_LIST_HEAD(&dev->bus_list); 1201 1202 return dev; 1203 } 1204 EXPORT_SYMBOL(alloc_pci_dev); 1205 1206 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, 1207 int crs_timeout) 1208 { 1209 int delay = 1; 1210 1211 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 1212 return false; 1213 1214 /* some broken boards return 0 or ~0 if a slot is empty: */ 1215 if (*l == 0xffffffff || *l == 0x00000000 || 1216 *l == 0x0000ffff || *l == 0xffff0000) 1217 return false; 1218 1219 /* Configuration request Retry Status */ 1220 while (*l == 0xffff0001) { 1221 if (!crs_timeout) 1222 return false; 1223 1224 msleep(delay); 1225 delay *= 2; 1226 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 1227 return false; 1228 /* Card hasn't responded in 60 seconds? Must be stuck. */ 1229 if (delay > crs_timeout) { 1230 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " 1231 "responding\n", pci_domain_nr(bus), 1232 bus->number, PCI_SLOT(devfn), 1233 PCI_FUNC(devfn)); 1234 return false; 1235 } 1236 } 1237 1238 return true; 1239 } 1240 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); 1241 1242 /* 1243 * Read the config data for a PCI device, sanity-check it 1244 * and fill in the dev structure... 1245 */ 1246 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 1247 { 1248 struct pci_dev *dev; 1249 u32 l; 1250 1251 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) 1252 return NULL; 1253 1254 dev = alloc_pci_dev(); 1255 if (!dev) 1256 return NULL; 1257 1258 dev->bus = bus; 1259 dev->devfn = devfn; 1260 dev->vendor = l & 0xffff; 1261 dev->device = (l >> 16) & 0xffff; 1262 1263 pci_set_of_node(dev); 1264 1265 if (pci_setup_device(dev)) { 1266 kfree(dev); 1267 return NULL; 1268 } 1269 1270 return dev; 1271 } 1272 1273 static void pci_init_capabilities(struct pci_dev *dev) 1274 { 1275 /* MSI/MSI-X list */ 1276 pci_msi_init_pci_dev(dev); 1277 1278 /* Buffers for saving PCIe and PCI-X capabilities */ 1279 pci_allocate_cap_save_buffers(dev); 1280 1281 /* Power Management */ 1282 pci_pm_init(dev); 1283 1284 /* Vital Product Data */ 1285 pci_vpd_pci22_init(dev); 1286 1287 /* Alternative Routing-ID Forwarding */ 1288 pci_enable_ari(dev); 1289 1290 /* Single Root I/O Virtualization */ 1291 pci_iov_init(dev); 1292 1293 /* Enable ACS P2P upstream forwarding */ 1294 pci_enable_acs(dev); 1295 } 1296 1297 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 1298 { 1299 device_initialize(&dev->dev); 1300 dev->dev.release = pci_release_dev; 1301 pci_dev_get(dev); 1302 1303 dev->dev.dma_mask = &dev->dma_mask; 1304 dev->dev.dma_parms = &dev->dma_parms; 1305 dev->dev.coherent_dma_mask = 0xffffffffull; 1306 1307 pci_set_dma_max_seg_size(dev, 65536); 1308 pci_set_dma_seg_boundary(dev, 0xffffffff); 1309 1310 /* Fix up broken headers */ 1311 pci_fixup_device(pci_fixup_header, dev); 1312 1313 /* moved out from quirk header fixup code */ 1314 pci_reassigndev_resource_alignment(dev); 1315 1316 /* Clear the state_saved flag. */ 1317 dev->state_saved = false; 1318 1319 /* Initialize various capabilities */ 1320 pci_init_capabilities(dev); 1321 1322 /* 1323 * Add the device to our list of discovered devices 1324 * and the bus list for fixup functions, etc. 1325 */ 1326 down_write(&pci_bus_sem); 1327 list_add_tail(&dev->bus_list, &bus->devices); 1328 up_write(&pci_bus_sem); 1329 } 1330 1331 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) 1332 { 1333 struct pci_dev *dev; 1334 1335 dev = pci_get_slot(bus, devfn); 1336 if (dev) { 1337 pci_dev_put(dev); 1338 return dev; 1339 } 1340 1341 dev = pci_scan_device(bus, devfn); 1342 if (!dev) 1343 return NULL; 1344 1345 pci_device_add(dev, bus); 1346 1347 return dev; 1348 } 1349 EXPORT_SYMBOL(pci_scan_single_device); 1350 1351 static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn) 1352 { 1353 u16 cap; 1354 unsigned pos, next_fn; 1355 1356 if (!dev) 1357 return 0; 1358 1359 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1360 if (!pos) 1361 return 0; 1362 pci_read_config_word(dev, pos + 4, &cap); 1363 next_fn = cap >> 8; 1364 if (next_fn <= fn) 1365 return 0; 1366 return next_fn; 1367 } 1368 1369 static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn) 1370 { 1371 return (fn + 1) % 8; 1372 } 1373 1374 static unsigned no_next_fn(struct pci_dev *dev, unsigned fn) 1375 { 1376 return 0; 1377 } 1378 1379 static int only_one_child(struct pci_bus *bus) 1380 { 1381 struct pci_dev *parent = bus->self; 1382 1383 if (!parent || !pci_is_pcie(parent)) 1384 return 0; 1385 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT) 1386 return 1; 1387 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM && 1388 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) 1389 return 1; 1390 return 0; 1391 } 1392 1393 /** 1394 * pci_scan_slot - scan a PCI slot on a bus for devices. 1395 * @bus: PCI bus to scan 1396 * @devfn: slot number to scan (must have zero function.) 1397 * 1398 * Scan a PCI slot on the specified PCI bus for devices, adding 1399 * discovered devices to the @bus->devices list. New devices 1400 * will not have is_added set. 1401 * 1402 * Returns the number of new devices found. 1403 */ 1404 int pci_scan_slot(struct pci_bus *bus, int devfn) 1405 { 1406 unsigned fn, nr = 0; 1407 struct pci_dev *dev; 1408 unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn; 1409 1410 if (only_one_child(bus) && (devfn > 0)) 1411 return 0; /* Already scanned the entire slot */ 1412 1413 dev = pci_scan_single_device(bus, devfn); 1414 if (!dev) 1415 return 0; 1416 if (!dev->is_added) 1417 nr++; 1418 1419 if (pci_ari_enabled(bus)) 1420 next_fn = next_ari_fn; 1421 else if (dev->multifunction) 1422 next_fn = next_trad_fn; 1423 1424 for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) { 1425 dev = pci_scan_single_device(bus, devfn + fn); 1426 if (dev) { 1427 if (!dev->is_added) 1428 nr++; 1429 dev->multifunction = 1; 1430 } 1431 } 1432 1433 /* only one slot has pcie device */ 1434 if (bus->self && nr) 1435 pcie_aspm_init_link_state(bus->self); 1436 1437 return nr; 1438 } 1439 1440 static int pcie_find_smpss(struct pci_dev *dev, void *data) 1441 { 1442 u8 *smpss = data; 1443 1444 if (!pci_is_pcie(dev)) 1445 return 0; 1446 1447 /* For PCIE hotplug enabled slots not connected directly to a 1448 * PCI-E root port, there can be problems when hotplugging 1449 * devices. This is due to the possibility of hotplugging a 1450 * device into the fabric with a smaller MPS that the devices 1451 * currently running have configured. Modifying the MPS on the 1452 * running devices could cause a fatal bus error due to an 1453 * incoming frame being larger than the newly configured MPS. 1454 * To work around this, the MPS for the entire fabric must be 1455 * set to the minimum size. Any devices hotplugged into this 1456 * fabric will have the minimum MPS set. If the PCI hotplug 1457 * slot is directly connected to the root port and there are not 1458 * other devices on the fabric (which seems to be the most 1459 * common case), then this is not an issue and MPS discovery 1460 * will occur as normal. 1461 */ 1462 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || 1463 (dev->bus->self && 1464 pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT))) 1465 *smpss = 0; 1466 1467 if (*smpss > dev->pcie_mpss) 1468 *smpss = dev->pcie_mpss; 1469 1470 return 0; 1471 } 1472 1473 static void pcie_write_mps(struct pci_dev *dev, int mps) 1474 { 1475 int rc; 1476 1477 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 1478 mps = 128 << dev->pcie_mpss; 1479 1480 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && 1481 dev->bus->self) 1482 /* For "Performance", the assumption is made that 1483 * downstream communication will never be larger than 1484 * the MRRS. So, the MPS only needs to be configured 1485 * for the upstream communication. This being the case, 1486 * walk from the top down and set the MPS of the child 1487 * to that of the parent bus. 1488 * 1489 * Configure the device MPS with the smaller of the 1490 * device MPSS or the bridge MPS (which is assumed to be 1491 * properly configured at this point to the largest 1492 * allowable MPS based on its parent bus). 1493 */ 1494 mps = min(mps, pcie_get_mps(dev->bus->self)); 1495 } 1496 1497 rc = pcie_set_mps(dev, mps); 1498 if (rc) 1499 dev_err(&dev->dev, "Failed attempting to set the MPS\n"); 1500 } 1501 1502 static void pcie_write_mrrs(struct pci_dev *dev) 1503 { 1504 int rc, mrrs; 1505 1506 /* In the "safe" case, do not configure the MRRS. There appear to be 1507 * issues with setting MRRS to 0 on a number of devices. 1508 */ 1509 if (pcie_bus_config != PCIE_BUS_PERFORMANCE) 1510 return; 1511 1512 /* For Max performance, the MRRS must be set to the largest supported 1513 * value. However, it cannot be configured larger than the MPS the 1514 * device or the bus can support. This should already be properly 1515 * configured by a prior call to pcie_write_mps. 1516 */ 1517 mrrs = pcie_get_mps(dev); 1518 1519 /* MRRS is a R/W register. Invalid values can be written, but a 1520 * subsequent read will verify if the value is acceptable or not. 1521 * If the MRRS value provided is not acceptable (e.g., too large), 1522 * shrink the value until it is acceptable to the HW. 1523 */ 1524 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 1525 rc = pcie_set_readrq(dev, mrrs); 1526 if (!rc) 1527 break; 1528 1529 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); 1530 mrrs /= 2; 1531 } 1532 1533 if (mrrs < 128) 1534 dev_err(&dev->dev, "MRRS was unable to be configured with a " 1535 "safe value. If problems are experienced, try running " 1536 "with pci=pcie_bus_safe.\n"); 1537 } 1538 1539 static int pcie_bus_configure_set(struct pci_dev *dev, void *data) 1540 { 1541 int mps, orig_mps; 1542 1543 if (!pci_is_pcie(dev)) 1544 return 0; 1545 1546 mps = 128 << *(u8 *)data; 1547 orig_mps = pcie_get_mps(dev); 1548 1549 pcie_write_mps(dev, mps); 1550 pcie_write_mrrs(dev); 1551 1552 dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), " 1553 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, 1554 orig_mps, pcie_get_readrq(dev)); 1555 1556 return 0; 1557 } 1558 1559 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, 1560 * parents then children fashion. If this changes, then this code will not 1561 * work as designed. 1562 */ 1563 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) 1564 { 1565 u8 smpss; 1566 1567 if (!pci_is_pcie(bus->self)) 1568 return; 1569 1570 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) 1571 return; 1572 1573 /* FIXME - Peer to peer DMA is possible, though the endpoint would need 1574 * to be aware to the MPS of the destination. To work around this, 1575 * simply force the MPS of the entire system to the smallest possible. 1576 */ 1577 if (pcie_bus_config == PCIE_BUS_PEER2PEER) 1578 smpss = 0; 1579 1580 if (pcie_bus_config == PCIE_BUS_SAFE) { 1581 smpss = mpss; 1582 1583 pcie_find_smpss(bus->self, &smpss); 1584 pci_walk_bus(bus, pcie_find_smpss, &smpss); 1585 } 1586 1587 pcie_bus_configure_set(bus->self, &smpss); 1588 pci_walk_bus(bus, pcie_bus_configure_set, &smpss); 1589 } 1590 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); 1591 1592 unsigned int pci_scan_child_bus(struct pci_bus *bus) 1593 { 1594 unsigned int devfn, pass, max = bus->busn_res.start; 1595 struct pci_dev *dev; 1596 1597 dev_dbg(&bus->dev, "scanning bus\n"); 1598 1599 /* Go find them, Rover! */ 1600 for (devfn = 0; devfn < 0x100; devfn += 8) 1601 pci_scan_slot(bus, devfn); 1602 1603 /* Reserve buses for SR-IOV capability. */ 1604 max += pci_iov_bus_range(bus); 1605 1606 /* 1607 * After performing arch-dependent fixup of the bus, look behind 1608 * all PCI-to-PCI bridges on this bus. 1609 */ 1610 if (!bus->is_added) { 1611 dev_dbg(&bus->dev, "fixups for bus\n"); 1612 pcibios_fixup_bus(bus); 1613 if (pci_is_root_bus(bus)) 1614 bus->is_added = 1; 1615 } 1616 1617 for (pass=0; pass < 2; pass++) 1618 list_for_each_entry(dev, &bus->devices, bus_list) { 1619 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 1620 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 1621 max = pci_scan_bridge(bus, dev, max, pass); 1622 } 1623 1624 /* 1625 * We've scanned the bus and so we know all about what's on 1626 * the other side of any bridges that may be on this bus plus 1627 * any devices. 1628 * 1629 * Return how far we've got finding sub-buses. 1630 */ 1631 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); 1632 return max; 1633 } 1634 1635 struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 1636 struct pci_ops *ops, void *sysdata, struct list_head *resources) 1637 { 1638 int error; 1639 struct pci_host_bridge *bridge; 1640 struct pci_bus *b, *b2; 1641 struct pci_host_bridge_window *window, *n; 1642 struct resource *res; 1643 resource_size_t offset; 1644 char bus_addr[64]; 1645 char *fmt; 1646 1647 1648 b = pci_alloc_bus(); 1649 if (!b) 1650 return NULL; 1651 1652 b->sysdata = sysdata; 1653 b->ops = ops; 1654 b2 = pci_find_bus(pci_domain_nr(b), bus); 1655 if (b2) { 1656 /* If we already got to this bus through a different bridge, ignore it */ 1657 dev_dbg(&b2->dev, "bus already known\n"); 1658 goto err_out; 1659 } 1660 1661 bridge = pci_alloc_host_bridge(b); 1662 if (!bridge) 1663 goto err_out; 1664 1665 bridge->dev.parent = parent; 1666 bridge->dev.release = pci_release_bus_bridge_dev; 1667 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus); 1668 error = device_register(&bridge->dev); 1669 if (error) 1670 goto bridge_dev_reg_err; 1671 b->bridge = get_device(&bridge->dev); 1672 device_enable_async_suspend(b->bridge); 1673 pci_set_bus_of_node(b); 1674 1675 if (!parent) 1676 set_dev_node(b->bridge, pcibus_to_node(b)); 1677 1678 b->dev.class = &pcibus_class; 1679 b->dev.parent = b->bridge; 1680 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); 1681 error = device_register(&b->dev); 1682 if (error) 1683 goto class_dev_reg_err; 1684 1685 /* Create legacy_io and legacy_mem files for this bus */ 1686 pci_create_legacy_files(b); 1687 1688 b->number = b->busn_res.start = bus; 1689 1690 if (parent) 1691 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1692 else 1693 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1694 1695 /* Add initial resources to the bus */ 1696 list_for_each_entry_safe(window, n, resources, list) { 1697 list_move_tail(&window->list, &bridge->windows); 1698 res = window->res; 1699 offset = window->offset; 1700 if (res->flags & IORESOURCE_BUS) 1701 pci_bus_insert_busn_res(b, bus, res->end); 1702 else 1703 pci_bus_add_resource(b, res, 0); 1704 if (offset) { 1705 if (resource_type(res) == IORESOURCE_IO) 1706 fmt = " (bus address [%#06llx-%#06llx])"; 1707 else 1708 fmt = " (bus address [%#010llx-%#010llx])"; 1709 snprintf(bus_addr, sizeof(bus_addr), fmt, 1710 (unsigned long long) (res->start - offset), 1711 (unsigned long long) (res->end - offset)); 1712 } else 1713 bus_addr[0] = '\0'; 1714 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr); 1715 } 1716 1717 down_write(&pci_bus_sem); 1718 list_add_tail(&b->node, &pci_root_buses); 1719 up_write(&pci_bus_sem); 1720 1721 return b; 1722 1723 class_dev_reg_err: 1724 put_device(&bridge->dev); 1725 device_unregister(&bridge->dev); 1726 bridge_dev_reg_err: 1727 kfree(bridge); 1728 err_out: 1729 kfree(b); 1730 return NULL; 1731 } 1732 1733 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) 1734 { 1735 struct resource *res = &b->busn_res; 1736 struct resource *parent_res, *conflict; 1737 1738 res->start = bus; 1739 res->end = bus_max; 1740 res->flags = IORESOURCE_BUS; 1741 1742 if (!pci_is_root_bus(b)) 1743 parent_res = &b->parent->busn_res; 1744 else { 1745 parent_res = get_pci_domain_busn_res(pci_domain_nr(b)); 1746 res->flags |= IORESOURCE_PCI_FIXED; 1747 } 1748 1749 conflict = insert_resource_conflict(parent_res, res); 1750 1751 if (conflict) 1752 dev_printk(KERN_DEBUG, &b->dev, 1753 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", 1754 res, pci_is_root_bus(b) ? "domain " : "", 1755 parent_res, conflict->name, conflict); 1756 1757 return conflict == NULL; 1758 } 1759 1760 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) 1761 { 1762 struct resource *res = &b->busn_res; 1763 struct resource old_res = *res; 1764 resource_size_t size; 1765 int ret; 1766 1767 if (res->start > bus_max) 1768 return -EINVAL; 1769 1770 size = bus_max - res->start + 1; 1771 ret = adjust_resource(res, res->start, size); 1772 dev_printk(KERN_DEBUG, &b->dev, 1773 "busn_res: %pR end %s updated to %02x\n", 1774 &old_res, ret ? "can not be" : "is", bus_max); 1775 1776 if (!ret && !res->parent) 1777 pci_bus_insert_busn_res(b, res->start, res->end); 1778 1779 return ret; 1780 } 1781 1782 void pci_bus_release_busn_res(struct pci_bus *b) 1783 { 1784 struct resource *res = &b->busn_res; 1785 int ret; 1786 1787 if (!res->flags || !res->parent) 1788 return; 1789 1790 ret = release_resource(res); 1791 dev_printk(KERN_DEBUG, &b->dev, 1792 "busn_res: %pR %s released\n", 1793 res, ret ? "can not be" : "is"); 1794 } 1795 1796 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 1797 struct pci_ops *ops, void *sysdata, struct list_head *resources) 1798 { 1799 struct pci_host_bridge_window *window; 1800 bool found = false; 1801 struct pci_bus *b; 1802 int max; 1803 1804 list_for_each_entry(window, resources, list) 1805 if (window->res->flags & IORESOURCE_BUS) { 1806 found = true; 1807 break; 1808 } 1809 1810 b = pci_create_root_bus(parent, bus, ops, sysdata, resources); 1811 if (!b) 1812 return NULL; 1813 1814 if (!found) { 1815 dev_info(&b->dev, 1816 "No busn resource found for root bus, will use [bus %02x-ff]\n", 1817 bus); 1818 pci_bus_insert_busn_res(b, bus, 255); 1819 } 1820 1821 max = pci_scan_child_bus(b); 1822 1823 if (!found) 1824 pci_bus_update_busn_res_end(b, max); 1825 1826 pci_bus_add_devices(b); 1827 return b; 1828 } 1829 EXPORT_SYMBOL(pci_scan_root_bus); 1830 1831 /* Deprecated; use pci_scan_root_bus() instead */ 1832 struct pci_bus *pci_scan_bus_parented(struct device *parent, 1833 int bus, struct pci_ops *ops, void *sysdata) 1834 { 1835 LIST_HEAD(resources); 1836 struct pci_bus *b; 1837 1838 pci_add_resource(&resources, &ioport_resource); 1839 pci_add_resource(&resources, &iomem_resource); 1840 pci_add_resource(&resources, &busn_resource); 1841 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources); 1842 if (b) 1843 pci_scan_child_bus(b); 1844 else 1845 pci_free_resource_list(&resources); 1846 return b; 1847 } 1848 EXPORT_SYMBOL(pci_scan_bus_parented); 1849 1850 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, 1851 void *sysdata) 1852 { 1853 LIST_HEAD(resources); 1854 struct pci_bus *b; 1855 1856 pci_add_resource(&resources, &ioport_resource); 1857 pci_add_resource(&resources, &iomem_resource); 1858 pci_add_resource(&resources, &busn_resource); 1859 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); 1860 if (b) { 1861 pci_scan_child_bus(b); 1862 pci_bus_add_devices(b); 1863 } else { 1864 pci_free_resource_list(&resources); 1865 } 1866 return b; 1867 } 1868 EXPORT_SYMBOL(pci_scan_bus); 1869 1870 /** 1871 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices. 1872 * @bridge: PCI bridge for the bus to scan 1873 * 1874 * Scan a PCI bus and child buses for new devices, add them, 1875 * and enable them, resizing bridge mmio/io resource if necessary 1876 * and possible. The caller must ensure the child devices are already 1877 * removed for resizing to occur. 1878 * 1879 * Returns the max number of subordinate bus discovered. 1880 */ 1881 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge) 1882 { 1883 unsigned int max; 1884 struct pci_bus *bus = bridge->subordinate; 1885 1886 max = pci_scan_child_bus(bus); 1887 1888 pci_assign_unassigned_bridge_resources(bridge); 1889 1890 pci_bus_add_devices(bus); 1891 1892 return max; 1893 } 1894 1895 /** 1896 * pci_rescan_bus - scan a PCI bus for devices. 1897 * @bus: PCI bus to scan 1898 * 1899 * Scan a PCI bus and child buses for new devices, adds them, 1900 * and enables them. 1901 * 1902 * Returns the max number of subordinate bus discovered. 1903 */ 1904 unsigned int __ref pci_rescan_bus(struct pci_bus *bus) 1905 { 1906 unsigned int max; 1907 1908 max = pci_scan_child_bus(bus); 1909 pci_assign_unassigned_bus_resources(bus); 1910 pci_enable_bridges(bus); 1911 pci_bus_add_devices(bus); 1912 1913 return max; 1914 } 1915 EXPORT_SYMBOL_GPL(pci_rescan_bus); 1916 1917 EXPORT_SYMBOL(pci_add_new_bus); 1918 EXPORT_SYMBOL(pci_scan_slot); 1919 EXPORT_SYMBOL(pci_scan_bridge); 1920 EXPORT_SYMBOL_GPL(pci_scan_child_bus); 1921 1922 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) 1923 { 1924 const struct pci_dev *a = to_pci_dev(d_a); 1925 const struct pci_dev *b = to_pci_dev(d_b); 1926 1927 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 1928 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 1929 1930 if (a->bus->number < b->bus->number) return -1; 1931 else if (a->bus->number > b->bus->number) return 1; 1932 1933 if (a->devfn < b->devfn) return -1; 1934 else if (a->devfn > b->devfn) return 1; 1935 1936 return 0; 1937 } 1938 1939 void __init pci_sort_breadthfirst(void) 1940 { 1941 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); 1942 } 1943