1 /* 2 * probe.c - PCI detection and setup code 3 */ 4 5 #include <linux/kernel.h> 6 #include <linux/delay.h> 7 #include <linux/init.h> 8 #include <linux/pci.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/cpumask.h> 12 #include <linux/pci-aspm.h> 13 #include <asm-generic/pci-bridge.h> 14 #include "pci.h" 15 16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 17 #define CARDBUS_RESERVE_BUSNR 3 18 19 struct resource busn_resource = { 20 .name = "PCI busn", 21 .start = 0, 22 .end = 255, 23 .flags = IORESOURCE_BUS, 24 }; 25 26 /* Ugh. Need to stop exporting this to modules. */ 27 LIST_HEAD(pci_root_buses); 28 EXPORT_SYMBOL(pci_root_buses); 29 30 static LIST_HEAD(pci_domain_busn_res_list); 31 32 struct pci_domain_busn_res { 33 struct list_head list; 34 struct resource res; 35 int domain_nr; 36 }; 37 38 static struct resource *get_pci_domain_busn_res(int domain_nr) 39 { 40 struct pci_domain_busn_res *r; 41 42 list_for_each_entry(r, &pci_domain_busn_res_list, list) 43 if (r->domain_nr == domain_nr) 44 return &r->res; 45 46 r = kzalloc(sizeof(*r), GFP_KERNEL); 47 if (!r) 48 return NULL; 49 50 r->domain_nr = domain_nr; 51 r->res.start = 0; 52 r->res.end = 0xff; 53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED; 54 55 list_add_tail(&r->list, &pci_domain_busn_res_list); 56 57 return &r->res; 58 } 59 60 static int find_anything(struct device *dev, void *data) 61 { 62 return 1; 63 } 64 65 /* 66 * Some device drivers need know if pci is initiated. 67 * Basically, we think pci is not initiated when there 68 * is no device to be found on the pci_bus_type. 69 */ 70 int no_pci_devices(void) 71 { 72 struct device *dev; 73 int no_devices; 74 75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything); 76 no_devices = (dev == NULL); 77 put_device(dev); 78 return no_devices; 79 } 80 EXPORT_SYMBOL(no_pci_devices); 81 82 /* 83 * PCI Bus Class 84 */ 85 static void release_pcibus_dev(struct device *dev) 86 { 87 struct pci_bus *pci_bus = to_pci_bus(dev); 88 89 if (pci_bus->bridge) 90 put_device(pci_bus->bridge); 91 pci_bus_remove_resources(pci_bus); 92 pci_release_bus_of_node(pci_bus); 93 kfree(pci_bus); 94 } 95 96 static struct class pcibus_class = { 97 .name = "pci_bus", 98 .dev_release = &release_pcibus_dev, 99 .dev_attrs = pcibus_dev_attrs, 100 }; 101 102 static int __init pcibus_class_init(void) 103 { 104 return class_register(&pcibus_class); 105 } 106 postcore_initcall(pcibus_class_init); 107 108 static u64 pci_size(u64 base, u64 maxbase, u64 mask) 109 { 110 u64 size = mask & maxbase; /* Find the significant bits */ 111 if (!size) 112 return 0; 113 114 /* Get the lowest of them to find the decode size, and 115 from that the extent. */ 116 size = (size & ~(size-1)) - 1; 117 118 /* base == maxbase can be valid only if the BAR has 119 already been programmed with all 1s. */ 120 if (base == maxbase && ((base | size) & mask) != mask) 121 return 0; 122 123 return size; 124 } 125 126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) 127 { 128 u32 mem_type; 129 unsigned long flags; 130 131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { 132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; 133 flags |= IORESOURCE_IO; 134 return flags; 135 } 136 137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; 138 flags |= IORESOURCE_MEM; 139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) 140 flags |= IORESOURCE_PREFETCH; 141 142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; 143 switch (mem_type) { 144 case PCI_BASE_ADDRESS_MEM_TYPE_32: 145 break; 146 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 147 /* 1M mem BAR treated as 32-bit BAR */ 148 break; 149 case PCI_BASE_ADDRESS_MEM_TYPE_64: 150 flags |= IORESOURCE_MEM_64; 151 break; 152 default: 153 /* mem unknown type treated as 32-bit BAR */ 154 break; 155 } 156 return flags; 157 } 158 159 /** 160 * pci_read_base - read a PCI BAR 161 * @dev: the PCI device 162 * @type: type of the BAR 163 * @res: resource buffer to be filled in 164 * @pos: BAR position in the config space 165 * 166 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. 167 */ 168 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, 169 struct resource *res, unsigned int pos) 170 { 171 u32 l, sz, mask; 172 u16 orig_cmd; 173 struct pci_bus_region region; 174 bool bar_too_big = false, bar_disabled = false; 175 176 mask = type ? PCI_ROM_ADDRESS_MASK : ~0; 177 178 /* No printks while decoding is disabled! */ 179 if (!dev->mmio_always_on) { 180 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); 181 pci_write_config_word(dev, PCI_COMMAND, 182 orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO)); 183 } 184 185 res->name = pci_name(dev); 186 187 pci_read_config_dword(dev, pos, &l); 188 pci_write_config_dword(dev, pos, l | mask); 189 pci_read_config_dword(dev, pos, &sz); 190 pci_write_config_dword(dev, pos, l); 191 192 /* 193 * All bits set in sz means the device isn't working properly. 194 * If the BAR isn't implemented, all bits must be 0. If it's a 195 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit 196 * 1 must be clear. 197 */ 198 if (!sz || sz == 0xffffffff) 199 goto fail; 200 201 /* 202 * I don't know how l can have all bits set. Copied from old code. 203 * Maybe it fixes a bug on some ancient platform. 204 */ 205 if (l == 0xffffffff) 206 l = 0; 207 208 if (type == pci_bar_unknown) { 209 res->flags = decode_bar(dev, l); 210 res->flags |= IORESOURCE_SIZEALIGN; 211 if (res->flags & IORESOURCE_IO) { 212 l &= PCI_BASE_ADDRESS_IO_MASK; 213 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; 214 } else { 215 l &= PCI_BASE_ADDRESS_MEM_MASK; 216 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; 217 } 218 } else { 219 res->flags |= (l & IORESOURCE_ROM_ENABLE); 220 l &= PCI_ROM_ADDRESS_MASK; 221 mask = (u32)PCI_ROM_ADDRESS_MASK; 222 } 223 224 if (res->flags & IORESOURCE_MEM_64) { 225 u64 l64 = l; 226 u64 sz64 = sz; 227 u64 mask64 = mask | (u64)~0 << 32; 228 229 pci_read_config_dword(dev, pos + 4, &l); 230 pci_write_config_dword(dev, pos + 4, ~0); 231 pci_read_config_dword(dev, pos + 4, &sz); 232 pci_write_config_dword(dev, pos + 4, l); 233 234 l64 |= ((u64)l << 32); 235 sz64 |= ((u64)sz << 32); 236 237 sz64 = pci_size(l64, sz64, mask64); 238 239 if (!sz64) 240 goto fail; 241 242 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { 243 bar_too_big = true; 244 goto fail; 245 } 246 247 if ((sizeof(resource_size_t) < 8) && l) { 248 /* Address above 32-bit boundary; disable the BAR */ 249 pci_write_config_dword(dev, pos, 0); 250 pci_write_config_dword(dev, pos + 4, 0); 251 region.start = 0; 252 region.end = sz64; 253 pcibios_bus_to_resource(dev, res, ®ion); 254 bar_disabled = true; 255 } else { 256 region.start = l64; 257 region.end = l64 + sz64; 258 pcibios_bus_to_resource(dev, res, ®ion); 259 } 260 } else { 261 sz = pci_size(l, sz, mask); 262 263 if (!sz) 264 goto fail; 265 266 region.start = l; 267 region.end = l + sz; 268 pcibios_bus_to_resource(dev, res, ®ion); 269 } 270 271 goto out; 272 273 274 fail: 275 res->flags = 0; 276 out: 277 if (!dev->mmio_always_on) 278 pci_write_config_word(dev, PCI_COMMAND, orig_cmd); 279 280 if (bar_too_big) 281 dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); 282 if (res->flags && !bar_disabled) 283 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); 284 285 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; 286 } 287 288 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 289 { 290 unsigned int pos, reg; 291 292 for (pos = 0; pos < howmany; pos++) { 293 struct resource *res = &dev->resource[pos]; 294 reg = PCI_BASE_ADDRESS_0 + (pos << 2); 295 pos += __pci_read_base(dev, pci_bar_unknown, res, reg); 296 } 297 298 if (rom) { 299 struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; 300 dev->rom_base_reg = rom; 301 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | 302 IORESOURCE_READONLY | IORESOURCE_CACHEABLE | 303 IORESOURCE_SIZEALIGN; 304 __pci_read_base(dev, pci_bar_mem32, res, rom); 305 } 306 } 307 308 static void pci_read_bridge_io(struct pci_bus *child) 309 { 310 struct pci_dev *dev = child->self; 311 u8 io_base_lo, io_limit_lo; 312 unsigned long io_mask, io_granularity, base, limit; 313 struct pci_bus_region region; 314 struct resource *res; 315 316 io_mask = PCI_IO_RANGE_MASK; 317 io_granularity = 0x1000; 318 if (dev->io_window_1k) { 319 /* Support 1K I/O space granularity */ 320 io_mask = PCI_IO_1K_RANGE_MASK; 321 io_granularity = 0x400; 322 } 323 324 res = child->resource[0]; 325 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 326 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 327 base = (io_base_lo & io_mask) << 8; 328 limit = (io_limit_lo & io_mask) << 8; 329 330 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { 331 u16 io_base_hi, io_limit_hi; 332 333 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); 334 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); 335 base |= ((unsigned long) io_base_hi << 16); 336 limit |= ((unsigned long) io_limit_hi << 16); 337 } 338 339 if (base <= limit) { 340 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; 341 region.start = base; 342 region.end = limit + io_granularity - 1; 343 pcibios_bus_to_resource(dev, res, ®ion); 344 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 345 } 346 } 347 348 static void pci_read_bridge_mmio(struct pci_bus *child) 349 { 350 struct pci_dev *dev = child->self; 351 u16 mem_base_lo, mem_limit_lo; 352 unsigned long base, limit; 353 struct pci_bus_region region; 354 struct resource *res; 355 356 res = child->resource[1]; 357 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); 358 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); 359 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; 360 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; 361 if (base <= limit) { 362 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 363 region.start = base; 364 region.end = limit + 0xfffff; 365 pcibios_bus_to_resource(dev, res, ®ion); 366 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 367 } 368 } 369 370 static void pci_read_bridge_mmio_pref(struct pci_bus *child) 371 { 372 struct pci_dev *dev = child->self; 373 u16 mem_base_lo, mem_limit_lo; 374 unsigned long base, limit; 375 struct pci_bus_region region; 376 struct resource *res; 377 378 res = child->resource[2]; 379 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); 380 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); 381 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; 382 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; 383 384 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { 385 u32 mem_base_hi, mem_limit_hi; 386 387 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); 388 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); 389 390 /* 391 * Some bridges set the base > limit by default, and some 392 * (broken) BIOSes do not initialize them. If we find 393 * this, just assume they are not being used. 394 */ 395 if (mem_base_hi <= mem_limit_hi) { 396 #if BITS_PER_LONG == 64 397 base |= ((unsigned long) mem_base_hi) << 32; 398 limit |= ((unsigned long) mem_limit_hi) << 32; 399 #else 400 if (mem_base_hi || mem_limit_hi) { 401 dev_err(&dev->dev, "can't handle 64-bit " 402 "address space for bridge\n"); 403 return; 404 } 405 #endif 406 } 407 } 408 if (base <= limit) { 409 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | 410 IORESOURCE_MEM | IORESOURCE_PREFETCH; 411 if (res->flags & PCI_PREF_RANGE_TYPE_64) 412 res->flags |= IORESOURCE_MEM_64; 413 region.start = base; 414 region.end = limit + 0xfffff; 415 pcibios_bus_to_resource(dev, res, ®ion); 416 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res); 417 } 418 } 419 420 void pci_read_bridge_bases(struct pci_bus *child) 421 { 422 struct pci_dev *dev = child->self; 423 struct resource *res; 424 int i; 425 426 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 427 return; 428 429 dev_info(&dev->dev, "PCI bridge to %pR%s\n", 430 &child->busn_res, 431 dev->transparent ? " (subtractive decode)" : ""); 432 433 pci_bus_remove_resources(child); 434 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) 435 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; 436 437 pci_read_bridge_io(child); 438 pci_read_bridge_mmio(child); 439 pci_read_bridge_mmio_pref(child); 440 441 if (dev->transparent) { 442 pci_bus_for_each_resource(child->parent, res, i) { 443 if (res) { 444 pci_bus_add_resource(child, res, 445 PCI_SUBTRACTIVE_DECODE); 446 dev_printk(KERN_DEBUG, &dev->dev, 447 " bridge window %pR (subtractive decode)\n", 448 res); 449 } 450 } 451 } 452 } 453 454 static struct pci_bus * pci_alloc_bus(void) 455 { 456 struct pci_bus *b; 457 458 b = kzalloc(sizeof(*b), GFP_KERNEL); 459 if (b) { 460 INIT_LIST_HEAD(&b->node); 461 INIT_LIST_HEAD(&b->children); 462 INIT_LIST_HEAD(&b->devices); 463 INIT_LIST_HEAD(&b->slots); 464 INIT_LIST_HEAD(&b->resources); 465 b->max_bus_speed = PCI_SPEED_UNKNOWN; 466 b->cur_bus_speed = PCI_SPEED_UNKNOWN; 467 } 468 return b; 469 } 470 471 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b) 472 { 473 struct pci_host_bridge *bridge; 474 475 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); 476 if (bridge) { 477 INIT_LIST_HEAD(&bridge->windows); 478 bridge->bus = b; 479 } 480 481 return bridge; 482 } 483 484 static unsigned char pcix_bus_speed[] = { 485 PCI_SPEED_UNKNOWN, /* 0 */ 486 PCI_SPEED_66MHz_PCIX, /* 1 */ 487 PCI_SPEED_100MHz_PCIX, /* 2 */ 488 PCI_SPEED_133MHz_PCIX, /* 3 */ 489 PCI_SPEED_UNKNOWN, /* 4 */ 490 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ 491 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ 492 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ 493 PCI_SPEED_UNKNOWN, /* 8 */ 494 PCI_SPEED_66MHz_PCIX_266, /* 9 */ 495 PCI_SPEED_100MHz_PCIX_266, /* A */ 496 PCI_SPEED_133MHz_PCIX_266, /* B */ 497 PCI_SPEED_UNKNOWN, /* C */ 498 PCI_SPEED_66MHz_PCIX_533, /* D */ 499 PCI_SPEED_100MHz_PCIX_533, /* E */ 500 PCI_SPEED_133MHz_PCIX_533 /* F */ 501 }; 502 503 static unsigned char pcie_link_speed[] = { 504 PCI_SPEED_UNKNOWN, /* 0 */ 505 PCIE_SPEED_2_5GT, /* 1 */ 506 PCIE_SPEED_5_0GT, /* 2 */ 507 PCIE_SPEED_8_0GT, /* 3 */ 508 PCI_SPEED_UNKNOWN, /* 4 */ 509 PCI_SPEED_UNKNOWN, /* 5 */ 510 PCI_SPEED_UNKNOWN, /* 6 */ 511 PCI_SPEED_UNKNOWN, /* 7 */ 512 PCI_SPEED_UNKNOWN, /* 8 */ 513 PCI_SPEED_UNKNOWN, /* 9 */ 514 PCI_SPEED_UNKNOWN, /* A */ 515 PCI_SPEED_UNKNOWN, /* B */ 516 PCI_SPEED_UNKNOWN, /* C */ 517 PCI_SPEED_UNKNOWN, /* D */ 518 PCI_SPEED_UNKNOWN, /* E */ 519 PCI_SPEED_UNKNOWN /* F */ 520 }; 521 522 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) 523 { 524 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; 525 } 526 EXPORT_SYMBOL_GPL(pcie_update_link_speed); 527 528 static unsigned char agp_speeds[] = { 529 AGP_UNKNOWN, 530 AGP_1X, 531 AGP_2X, 532 AGP_4X, 533 AGP_8X 534 }; 535 536 static enum pci_bus_speed agp_speed(int agp3, int agpstat) 537 { 538 int index = 0; 539 540 if (agpstat & 4) 541 index = 3; 542 else if (agpstat & 2) 543 index = 2; 544 else if (agpstat & 1) 545 index = 1; 546 else 547 goto out; 548 549 if (agp3) { 550 index += 2; 551 if (index == 5) 552 index = 0; 553 } 554 555 out: 556 return agp_speeds[index]; 557 } 558 559 560 static void pci_set_bus_speed(struct pci_bus *bus) 561 { 562 struct pci_dev *bridge = bus->self; 563 int pos; 564 565 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); 566 if (!pos) 567 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); 568 if (pos) { 569 u32 agpstat, agpcmd; 570 571 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); 572 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); 573 574 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); 575 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); 576 } 577 578 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); 579 if (pos) { 580 u16 status; 581 enum pci_bus_speed max; 582 583 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS, 584 &status); 585 586 if (status & PCI_X_SSTATUS_533MHZ) { 587 max = PCI_SPEED_133MHz_PCIX_533; 588 } else if (status & PCI_X_SSTATUS_266MHZ) { 589 max = PCI_SPEED_133MHz_PCIX_266; 590 } else if (status & PCI_X_SSTATUS_133MHZ) { 591 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) { 592 max = PCI_SPEED_133MHz_PCIX_ECC; 593 } else { 594 max = PCI_SPEED_133MHz_PCIX; 595 } 596 } else { 597 max = PCI_SPEED_66MHz_PCIX; 598 } 599 600 bus->max_bus_speed = max; 601 bus->cur_bus_speed = pcix_bus_speed[ 602 (status & PCI_X_SSTATUS_FREQ) >> 6]; 603 604 return; 605 } 606 607 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); 608 if (pos) { 609 u32 linkcap; 610 u16 linksta; 611 612 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); 613 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; 614 615 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); 616 pcie_update_link_speed(bus, linksta); 617 } 618 } 619 620 621 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, 622 struct pci_dev *bridge, int busnr) 623 { 624 struct pci_bus *child; 625 int i; 626 int ret; 627 628 /* 629 * Allocate a new bus, and inherit stuff from the parent.. 630 */ 631 child = pci_alloc_bus(); 632 if (!child) 633 return NULL; 634 635 child->parent = parent; 636 child->ops = parent->ops; 637 child->sysdata = parent->sysdata; 638 child->bus_flags = parent->bus_flags; 639 640 /* initialize some portions of the bus device, but don't register it 641 * now as the parent is not properly set up yet. 642 */ 643 child->dev.class = &pcibus_class; 644 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); 645 646 /* 647 * Set up the primary, secondary and subordinate 648 * bus numbers. 649 */ 650 child->number = child->busn_res.start = busnr; 651 child->primary = parent->busn_res.start; 652 child->busn_res.end = 0xff; 653 654 if (!bridge) { 655 child->dev.parent = parent->bridge; 656 goto add_dev; 657 } 658 659 child->self = bridge; 660 child->bridge = get_device(&bridge->dev); 661 child->dev.parent = child->bridge; 662 pci_set_bus_of_node(child); 663 pci_set_bus_speed(child); 664 665 /* Set up default resource pointers and names.. */ 666 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { 667 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 668 child->resource[i]->name = child->name; 669 } 670 bridge->subordinate = child; 671 672 add_dev: 673 ret = device_register(&child->dev); 674 WARN_ON(ret < 0); 675 676 pcibios_add_bus(child); 677 678 /* Create legacy_io and legacy_mem files for this bus */ 679 pci_create_legacy_files(child); 680 681 return child; 682 } 683 684 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) 685 { 686 struct pci_bus *child; 687 688 child = pci_alloc_child_bus(parent, dev, busnr); 689 if (child) { 690 down_write(&pci_bus_sem); 691 list_add_tail(&child->node, &parent->children); 692 up_write(&pci_bus_sem); 693 } 694 return child; 695 } 696 697 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) 698 { 699 struct pci_bus *parent = child->parent; 700 701 /* Attempts to fix that up are really dangerous unless 702 we're going to re-assign all bus numbers. */ 703 if (!pcibios_assign_all_busses()) 704 return; 705 706 while (parent->parent && parent->busn_res.end < max) { 707 parent->busn_res.end = max; 708 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); 709 parent = parent->parent; 710 } 711 } 712 713 /* 714 * If it's a bridge, configure it and scan the bus behind it. 715 * For CardBus bridges, we don't scan behind as the devices will 716 * be handled by the bridge driver itself. 717 * 718 * We need to process bridges in two passes -- first we scan those 719 * already configured by the BIOS and after we are done with all of 720 * them, we proceed to assigning numbers to the remaining buses in 721 * order to avoid overlaps between old and new bus numbers. 722 */ 723 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) 724 { 725 struct pci_bus *child; 726 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 727 u32 buses, i, j = 0; 728 u16 bctl; 729 u8 primary, secondary, subordinate; 730 int broken = 0; 731 732 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 733 primary = buses & 0xFF; 734 secondary = (buses >> 8) & 0xFF; 735 subordinate = (buses >> 16) & 0xFF; 736 737 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", 738 secondary, subordinate, pass); 739 740 if (!primary && (primary != bus->number) && secondary && subordinate) { 741 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); 742 primary = bus->number; 743 } 744 745 /* Check if setup is sensible at all */ 746 if (!pass && 747 (primary != bus->number || secondary <= bus->number || 748 secondary > subordinate)) { 749 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", 750 secondary, subordinate); 751 broken = 1; 752 } 753 754 /* Disable MasterAbortMode during probing to avoid reporting 755 of bus errors (in some architectures) */ 756 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 757 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 758 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 759 760 if ((secondary || subordinate) && !pcibios_assign_all_busses() && 761 !is_cardbus && !broken) { 762 unsigned int cmax; 763 /* 764 * Bus already configured by firmware, process it in the first 765 * pass and just note the configuration. 766 */ 767 if (pass) 768 goto out; 769 770 /* 771 * If we already got to this bus through a different bridge, 772 * don't re-add it. This can happen with the i450NX chipset. 773 * 774 * However, we continue to descend down the hierarchy and 775 * scan remaining child buses. 776 */ 777 child = pci_find_bus(pci_domain_nr(bus), secondary); 778 if (!child) { 779 child = pci_add_new_bus(bus, dev, secondary); 780 if (!child) 781 goto out; 782 child->primary = primary; 783 pci_bus_insert_busn_res(child, secondary, subordinate); 784 child->bridge_ctl = bctl; 785 } 786 787 cmax = pci_scan_child_bus(child); 788 if (cmax > max) 789 max = cmax; 790 if (child->busn_res.end > max) 791 max = child->busn_res.end; 792 } else { 793 /* 794 * We need to assign a number to this bus which we always 795 * do in the second pass. 796 */ 797 if (!pass) { 798 if (pcibios_assign_all_busses() || broken) 799 /* Temporarily disable forwarding of the 800 configuration cycles on all bridges in 801 this bus segment to avoid possible 802 conflicts in the second pass between two 803 bridges programmed with overlapping 804 bus ranges. */ 805 pci_write_config_dword(dev, PCI_PRIMARY_BUS, 806 buses & ~0xffffff); 807 goto out; 808 } 809 810 /* Clear errors */ 811 pci_write_config_word(dev, PCI_STATUS, 0xffff); 812 813 /* Prevent assigning a bus number that already exists. 814 * This can happen when a bridge is hot-plugged, so in 815 * this case we only re-scan this bus. */ 816 child = pci_find_bus(pci_domain_nr(bus), max+1); 817 if (!child) { 818 child = pci_add_new_bus(bus, dev, ++max); 819 if (!child) 820 goto out; 821 pci_bus_insert_busn_res(child, max, 0xff); 822 } 823 buses = (buses & 0xff000000) 824 | ((unsigned int)(child->primary) << 0) 825 | ((unsigned int)(child->busn_res.start) << 8) 826 | ((unsigned int)(child->busn_res.end) << 16); 827 828 /* 829 * yenta.c forces a secondary latency timer of 176. 830 * Copy that behaviour here. 831 */ 832 if (is_cardbus) { 833 buses &= ~0xff000000; 834 buses |= CARDBUS_LATENCY_TIMER << 24; 835 } 836 837 /* 838 * We need to blast all three values with a single write. 839 */ 840 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 841 842 if (!is_cardbus) { 843 child->bridge_ctl = bctl; 844 /* 845 * Adjust subordinate busnr in parent buses. 846 * We do this before scanning for children because 847 * some devices may not be detected if the bios 848 * was lazy. 849 */ 850 pci_fixup_parent_subordinate_busnr(child, max); 851 /* Now we can scan all subordinate buses... */ 852 max = pci_scan_child_bus(child); 853 /* 854 * now fix it up again since we have found 855 * the real value of max. 856 */ 857 pci_fixup_parent_subordinate_busnr(child, max); 858 } else { 859 /* 860 * For CardBus bridges, we leave 4 bus numbers 861 * as cards with a PCI-to-PCI bridge can be 862 * inserted later. 863 */ 864 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) { 865 struct pci_bus *parent = bus; 866 if (pci_find_bus(pci_domain_nr(bus), 867 max+i+1)) 868 break; 869 while (parent->parent) { 870 if ((!pcibios_assign_all_busses()) && 871 (parent->busn_res.end > max) && 872 (parent->busn_res.end <= max+i)) { 873 j = 1; 874 } 875 parent = parent->parent; 876 } 877 if (j) { 878 /* 879 * Often, there are two cardbus bridges 880 * -- try to leave one valid bus number 881 * for each one. 882 */ 883 i /= 2; 884 break; 885 } 886 } 887 max += i; 888 pci_fixup_parent_subordinate_busnr(child, max); 889 } 890 /* 891 * Set the subordinate bus number to its real value. 892 */ 893 pci_bus_update_busn_res_end(child, max); 894 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); 895 } 896 897 sprintf(child->name, 898 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), 899 pci_domain_nr(bus), child->number); 900 901 /* Has only triggered on CardBus, fixup is in yenta_socket */ 902 while (bus->parent) { 903 if ((child->busn_res.end > bus->busn_res.end) || 904 (child->number > bus->busn_res.end) || 905 (child->number < bus->number) || 906 (child->busn_res.end < bus->number)) { 907 dev_info(&child->dev, "%pR %s " 908 "hidden behind%s bridge %s %pR\n", 909 &child->busn_res, 910 (bus->number > child->busn_res.end && 911 bus->busn_res.end < child->number) ? 912 "wholly" : "partially", 913 bus->self->transparent ? " transparent" : "", 914 dev_name(&bus->dev), 915 &bus->busn_res); 916 } 917 bus = bus->parent; 918 } 919 920 out: 921 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); 922 923 return max; 924 } 925 926 /* 927 * Read interrupt line and base address registers. 928 * The architecture-dependent code can tweak these, of course. 929 */ 930 static void pci_read_irq(struct pci_dev *dev) 931 { 932 unsigned char irq; 933 934 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); 935 dev->pin = irq; 936 if (irq) 937 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 938 dev->irq = irq; 939 } 940 941 void set_pcie_port_type(struct pci_dev *pdev) 942 { 943 int pos; 944 u16 reg16; 945 946 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 947 if (!pos) 948 return; 949 pdev->is_pcie = 1; 950 pdev->pcie_cap = pos; 951 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); 952 pdev->pcie_flags_reg = reg16; 953 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); 954 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 955 } 956 957 void set_pcie_hotplug_bridge(struct pci_dev *pdev) 958 { 959 u32 reg32; 960 961 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); 962 if (reg32 & PCI_EXP_SLTCAP_HPC) 963 pdev->is_hotplug_bridge = 1; 964 } 965 966 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 967 968 /** 969 * pci_setup_device - fill in class and map information of a device 970 * @dev: the device structure to fill 971 * 972 * Initialize the device structure with information about the device's 973 * vendor,class,memory and IO-space addresses,IRQ lines etc. 974 * Called at initialisation of the PCI subsystem and by CardBus services. 975 * Returns 0 on success and negative if unknown type of device (not normal, 976 * bridge or CardBus). 977 */ 978 int pci_setup_device(struct pci_dev *dev) 979 { 980 u32 class; 981 u8 hdr_type; 982 struct pci_slot *slot; 983 int pos = 0; 984 struct pci_bus_region region; 985 struct resource *res; 986 987 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) 988 return -EIO; 989 990 dev->sysdata = dev->bus->sysdata; 991 dev->dev.parent = dev->bus->bridge; 992 dev->dev.bus = &pci_bus_type; 993 dev->hdr_type = hdr_type & 0x7f; 994 dev->multifunction = !!(hdr_type & 0x80); 995 dev->error_state = pci_channel_io_normal; 996 set_pcie_port_type(dev); 997 998 list_for_each_entry(slot, &dev->bus->slots, list) 999 if (PCI_SLOT(dev->devfn) == slot->number) 1000 dev->slot = slot; 1001 1002 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 1003 set this higher, assuming the system even supports it. */ 1004 dev->dma_mask = 0xffffffff; 1005 1006 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 1007 dev->bus->number, PCI_SLOT(dev->devfn), 1008 PCI_FUNC(dev->devfn)); 1009 1010 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 1011 dev->revision = class & 0xff; 1012 dev->class = class >> 8; /* upper 3 bytes */ 1013 1014 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n", 1015 dev->vendor, dev->device, dev->hdr_type, dev->class); 1016 1017 /* need to have dev->class ready */ 1018 dev->cfg_size = pci_cfg_space_size(dev); 1019 1020 /* "Unknown power state" */ 1021 dev->current_state = PCI_UNKNOWN; 1022 1023 /* Early fixups, before probing the BARs */ 1024 pci_fixup_device(pci_fixup_early, dev); 1025 /* device class may be changed after fixup */ 1026 class = dev->class >> 8; 1027 1028 switch (dev->hdr_type) { /* header type */ 1029 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 1030 if (class == PCI_CLASS_BRIDGE_PCI) 1031 goto bad; 1032 pci_read_irq(dev); 1033 pci_read_bases(dev, 6, PCI_ROM_ADDRESS); 1034 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1035 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); 1036 1037 /* 1038 * Do the ugly legacy mode stuff here rather than broken chip 1039 * quirk code. Legacy mode ATA controllers have fixed 1040 * addresses. These are not always echoed in BAR0-3, and 1041 * BAR0-3 in a few cases contain junk! 1042 */ 1043 if (class == PCI_CLASS_STORAGE_IDE) { 1044 u8 progif; 1045 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 1046 if ((progif & 1) == 0) { 1047 region.start = 0x1F0; 1048 region.end = 0x1F7; 1049 res = &dev->resource[0]; 1050 res->flags = LEGACY_IO_RESOURCE; 1051 pcibios_bus_to_resource(dev, res, ®ion); 1052 region.start = 0x3F6; 1053 region.end = 0x3F6; 1054 res = &dev->resource[1]; 1055 res->flags = LEGACY_IO_RESOURCE; 1056 pcibios_bus_to_resource(dev, res, ®ion); 1057 } 1058 if ((progif & 4) == 0) { 1059 region.start = 0x170; 1060 region.end = 0x177; 1061 res = &dev->resource[2]; 1062 res->flags = LEGACY_IO_RESOURCE; 1063 pcibios_bus_to_resource(dev, res, ®ion); 1064 region.start = 0x376; 1065 region.end = 0x376; 1066 res = &dev->resource[3]; 1067 res->flags = LEGACY_IO_RESOURCE; 1068 pcibios_bus_to_resource(dev, res, ®ion); 1069 } 1070 } 1071 break; 1072 1073 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ 1074 if (class != PCI_CLASS_BRIDGE_PCI) 1075 goto bad; 1076 /* The PCI-to-PCI bridge spec requires that subtractive 1077 decoding (i.e. transparent) bridge must have programming 1078 interface code of 0x01. */ 1079 pci_read_irq(dev); 1080 dev->transparent = ((dev->class & 0xff) == 1); 1081 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 1082 set_pcie_hotplug_bridge(dev); 1083 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); 1084 if (pos) { 1085 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); 1086 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); 1087 } 1088 break; 1089 1090 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 1091 if (class != PCI_CLASS_BRIDGE_CARDBUS) 1092 goto bad; 1093 pci_read_irq(dev); 1094 pci_read_bases(dev, 1, 0); 1095 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 1096 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); 1097 break; 1098 1099 default: /* unknown header */ 1100 dev_err(&dev->dev, "unknown header type %02x, " 1101 "ignoring device\n", dev->hdr_type); 1102 return -EIO; 1103 1104 bad: 1105 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header " 1106 "type %02x)\n", dev->class, dev->hdr_type); 1107 dev->class = PCI_CLASS_NOT_DEFINED; 1108 } 1109 1110 /* We found a fine healthy device, go go go... */ 1111 return 0; 1112 } 1113 1114 static void pci_release_capabilities(struct pci_dev *dev) 1115 { 1116 pci_vpd_release(dev); 1117 pci_iov_release(dev); 1118 pci_free_cap_save_buffers(dev); 1119 } 1120 1121 /** 1122 * pci_release_dev - free a pci device structure when all users of it are finished. 1123 * @dev: device that's been disconnected 1124 * 1125 * Will be called only by the device core when all users of this pci device are 1126 * done. 1127 */ 1128 static void pci_release_dev(struct device *dev) 1129 { 1130 struct pci_dev *pci_dev; 1131 1132 pci_dev = to_pci_dev(dev); 1133 pci_release_capabilities(pci_dev); 1134 pci_release_of_node(pci_dev); 1135 kfree(pci_dev); 1136 } 1137 1138 /** 1139 * pci_cfg_space_size - get the configuration space size of the PCI device. 1140 * @dev: PCI device 1141 * 1142 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices 1143 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 1144 * access it. Maybe we don't have a way to generate extended config space 1145 * accesses, or the device is behind a reverse Express bridge. So we try 1146 * reading the dword at 0x100 which must either be 0 or a valid extended 1147 * capability header. 1148 */ 1149 int pci_cfg_space_size_ext(struct pci_dev *dev) 1150 { 1151 u32 status; 1152 int pos = PCI_CFG_SPACE_SIZE; 1153 1154 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) 1155 goto fail; 1156 if (status == 0xffffffff) 1157 goto fail; 1158 1159 return PCI_CFG_SPACE_EXP_SIZE; 1160 1161 fail: 1162 return PCI_CFG_SPACE_SIZE; 1163 } 1164 1165 int pci_cfg_space_size(struct pci_dev *dev) 1166 { 1167 int pos; 1168 u32 status; 1169 u16 class; 1170 1171 class = dev->class >> 8; 1172 if (class == PCI_CLASS_BRIDGE_HOST) 1173 return pci_cfg_space_size_ext(dev); 1174 1175 if (!pci_is_pcie(dev)) { 1176 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 1177 if (!pos) 1178 goto fail; 1179 1180 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); 1181 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) 1182 goto fail; 1183 } 1184 1185 return pci_cfg_space_size_ext(dev); 1186 1187 fail: 1188 return PCI_CFG_SPACE_SIZE; 1189 } 1190 1191 static void pci_release_bus_bridge_dev(struct device *dev) 1192 { 1193 struct pci_host_bridge *bridge = to_pci_host_bridge(dev); 1194 1195 if (bridge->release_fn) 1196 bridge->release_fn(bridge); 1197 1198 pci_free_resource_list(&bridge->windows); 1199 1200 kfree(bridge); 1201 } 1202 1203 struct pci_dev *alloc_pci_dev(void) 1204 { 1205 struct pci_dev *dev; 1206 1207 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); 1208 if (!dev) 1209 return NULL; 1210 1211 INIT_LIST_HEAD(&dev->bus_list); 1212 dev->dev.type = &pci_dev_type; 1213 1214 return dev; 1215 } 1216 EXPORT_SYMBOL(alloc_pci_dev); 1217 1218 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, 1219 int crs_timeout) 1220 { 1221 int delay = 1; 1222 1223 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 1224 return false; 1225 1226 /* some broken boards return 0 or ~0 if a slot is empty: */ 1227 if (*l == 0xffffffff || *l == 0x00000000 || 1228 *l == 0x0000ffff || *l == 0xffff0000) 1229 return false; 1230 1231 /* Configuration request Retry Status */ 1232 while (*l == 0xffff0001) { 1233 if (!crs_timeout) 1234 return false; 1235 1236 msleep(delay); 1237 delay *= 2; 1238 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) 1239 return false; 1240 /* Card hasn't responded in 60 seconds? Must be stuck. */ 1241 if (delay > crs_timeout) { 1242 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not " 1243 "responding\n", pci_domain_nr(bus), 1244 bus->number, PCI_SLOT(devfn), 1245 PCI_FUNC(devfn)); 1246 return false; 1247 } 1248 } 1249 1250 return true; 1251 } 1252 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); 1253 1254 /* 1255 * Read the config data for a PCI device, sanity-check it 1256 * and fill in the dev structure... 1257 */ 1258 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) 1259 { 1260 struct pci_dev *dev; 1261 u32 l; 1262 1263 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) 1264 return NULL; 1265 1266 dev = alloc_pci_dev(); 1267 if (!dev) 1268 return NULL; 1269 1270 dev->bus = bus; 1271 dev->devfn = devfn; 1272 dev->vendor = l & 0xffff; 1273 dev->device = (l >> 16) & 0xffff; 1274 1275 pci_set_of_node(dev); 1276 1277 if (pci_setup_device(dev)) { 1278 kfree(dev); 1279 return NULL; 1280 } 1281 1282 return dev; 1283 } 1284 1285 static void pci_init_capabilities(struct pci_dev *dev) 1286 { 1287 /* MSI/MSI-X list */ 1288 pci_msi_init_pci_dev(dev); 1289 1290 /* Buffers for saving PCIe and PCI-X capabilities */ 1291 pci_allocate_cap_save_buffers(dev); 1292 1293 /* Power Management */ 1294 pci_pm_init(dev); 1295 1296 /* Vital Product Data */ 1297 pci_vpd_pci22_init(dev); 1298 1299 /* Alternative Routing-ID Forwarding */ 1300 pci_configure_ari(dev); 1301 1302 /* Single Root I/O Virtualization */ 1303 pci_iov_init(dev); 1304 1305 /* Enable ACS P2P upstream forwarding */ 1306 pci_enable_acs(dev); 1307 } 1308 1309 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 1310 { 1311 int ret; 1312 1313 device_initialize(&dev->dev); 1314 dev->dev.release = pci_release_dev; 1315 1316 set_dev_node(&dev->dev, pcibus_to_node(bus)); 1317 dev->dev.dma_mask = &dev->dma_mask; 1318 dev->dev.dma_parms = &dev->dma_parms; 1319 dev->dev.coherent_dma_mask = 0xffffffffull; 1320 1321 pci_set_dma_max_seg_size(dev, 65536); 1322 pci_set_dma_seg_boundary(dev, 0xffffffff); 1323 1324 /* Fix up broken headers */ 1325 pci_fixup_device(pci_fixup_header, dev); 1326 1327 /* moved out from quirk header fixup code */ 1328 pci_reassigndev_resource_alignment(dev); 1329 1330 /* Clear the state_saved flag. */ 1331 dev->state_saved = false; 1332 1333 /* Initialize various capabilities */ 1334 pci_init_capabilities(dev); 1335 1336 /* 1337 * Add the device to our list of discovered devices 1338 * and the bus list for fixup functions, etc. 1339 */ 1340 down_write(&pci_bus_sem); 1341 list_add_tail(&dev->bus_list, &bus->devices); 1342 up_write(&pci_bus_sem); 1343 1344 pci_fixup_device(pci_fixup_final, dev); 1345 ret = pcibios_add_device(dev); 1346 WARN_ON(ret < 0); 1347 1348 /* Notifier could use PCI capabilities */ 1349 dev->match_driver = false; 1350 ret = device_add(&dev->dev); 1351 WARN_ON(ret < 0); 1352 1353 pci_proc_attach_device(dev); 1354 } 1355 1356 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) 1357 { 1358 struct pci_dev *dev; 1359 1360 dev = pci_get_slot(bus, devfn); 1361 if (dev) { 1362 pci_dev_put(dev); 1363 return dev; 1364 } 1365 1366 dev = pci_scan_device(bus, devfn); 1367 if (!dev) 1368 return NULL; 1369 1370 pci_device_add(dev, bus); 1371 1372 return dev; 1373 } 1374 EXPORT_SYMBOL(pci_scan_single_device); 1375 1376 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn) 1377 { 1378 int pos; 1379 u16 cap = 0; 1380 unsigned next_fn; 1381 1382 if (pci_ari_enabled(bus)) { 1383 if (!dev) 1384 return 0; 1385 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1386 if (!pos) 1387 return 0; 1388 1389 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap); 1390 next_fn = PCI_ARI_CAP_NFN(cap); 1391 if (next_fn <= fn) 1392 return 0; /* protect against malformed list */ 1393 1394 return next_fn; 1395 } 1396 1397 /* dev may be NULL for non-contiguous multifunction devices */ 1398 if (!dev || dev->multifunction) 1399 return (fn + 1) % 8; 1400 1401 return 0; 1402 } 1403 1404 static int only_one_child(struct pci_bus *bus) 1405 { 1406 struct pci_dev *parent = bus->self; 1407 1408 if (!parent || !pci_is_pcie(parent)) 1409 return 0; 1410 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT) 1411 return 1; 1412 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM && 1413 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) 1414 return 1; 1415 return 0; 1416 } 1417 1418 /** 1419 * pci_scan_slot - scan a PCI slot on a bus for devices. 1420 * @bus: PCI bus to scan 1421 * @devfn: slot number to scan (must have zero function.) 1422 * 1423 * Scan a PCI slot on the specified PCI bus for devices, adding 1424 * discovered devices to the @bus->devices list. New devices 1425 * will not have is_added set. 1426 * 1427 * Returns the number of new devices found. 1428 */ 1429 int pci_scan_slot(struct pci_bus *bus, int devfn) 1430 { 1431 unsigned fn, nr = 0; 1432 struct pci_dev *dev; 1433 1434 if (only_one_child(bus) && (devfn > 0)) 1435 return 0; /* Already scanned the entire slot */ 1436 1437 dev = pci_scan_single_device(bus, devfn); 1438 if (!dev) 1439 return 0; 1440 if (!dev->is_added) 1441 nr++; 1442 1443 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) { 1444 dev = pci_scan_single_device(bus, devfn + fn); 1445 if (dev) { 1446 if (!dev->is_added) 1447 nr++; 1448 dev->multifunction = 1; 1449 } 1450 } 1451 1452 /* only one slot has pcie device */ 1453 if (bus->self && nr) 1454 pcie_aspm_init_link_state(bus->self); 1455 1456 return nr; 1457 } 1458 1459 static int pcie_find_smpss(struct pci_dev *dev, void *data) 1460 { 1461 u8 *smpss = data; 1462 1463 if (!pci_is_pcie(dev)) 1464 return 0; 1465 1466 /* For PCIE hotplug enabled slots not connected directly to a 1467 * PCI-E root port, there can be problems when hotplugging 1468 * devices. This is due to the possibility of hotplugging a 1469 * device into the fabric with a smaller MPS that the devices 1470 * currently running have configured. Modifying the MPS on the 1471 * running devices could cause a fatal bus error due to an 1472 * incoming frame being larger than the newly configured MPS. 1473 * To work around this, the MPS for the entire fabric must be 1474 * set to the minimum size. Any devices hotplugged into this 1475 * fabric will have the minimum MPS set. If the PCI hotplug 1476 * slot is directly connected to the root port and there are not 1477 * other devices on the fabric (which seems to be the most 1478 * common case), then this is not an issue and MPS discovery 1479 * will occur as normal. 1480 */ 1481 if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || 1482 (dev->bus->self && 1483 pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT))) 1484 *smpss = 0; 1485 1486 if (*smpss > dev->pcie_mpss) 1487 *smpss = dev->pcie_mpss; 1488 1489 return 0; 1490 } 1491 1492 static void pcie_write_mps(struct pci_dev *dev, int mps) 1493 { 1494 int rc; 1495 1496 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { 1497 mps = 128 << dev->pcie_mpss; 1498 1499 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && 1500 dev->bus->self) 1501 /* For "Performance", the assumption is made that 1502 * downstream communication will never be larger than 1503 * the MRRS. So, the MPS only needs to be configured 1504 * for the upstream communication. This being the case, 1505 * walk from the top down and set the MPS of the child 1506 * to that of the parent bus. 1507 * 1508 * Configure the device MPS with the smaller of the 1509 * device MPSS or the bridge MPS (which is assumed to be 1510 * properly configured at this point to the largest 1511 * allowable MPS based on its parent bus). 1512 */ 1513 mps = min(mps, pcie_get_mps(dev->bus->self)); 1514 } 1515 1516 rc = pcie_set_mps(dev, mps); 1517 if (rc) 1518 dev_err(&dev->dev, "Failed attempting to set the MPS\n"); 1519 } 1520 1521 static void pcie_write_mrrs(struct pci_dev *dev) 1522 { 1523 int rc, mrrs; 1524 1525 /* In the "safe" case, do not configure the MRRS. There appear to be 1526 * issues with setting MRRS to 0 on a number of devices. 1527 */ 1528 if (pcie_bus_config != PCIE_BUS_PERFORMANCE) 1529 return; 1530 1531 /* For Max performance, the MRRS must be set to the largest supported 1532 * value. However, it cannot be configured larger than the MPS the 1533 * device or the bus can support. This should already be properly 1534 * configured by a prior call to pcie_write_mps. 1535 */ 1536 mrrs = pcie_get_mps(dev); 1537 1538 /* MRRS is a R/W register. Invalid values can be written, but a 1539 * subsequent read will verify if the value is acceptable or not. 1540 * If the MRRS value provided is not acceptable (e.g., too large), 1541 * shrink the value until it is acceptable to the HW. 1542 */ 1543 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { 1544 rc = pcie_set_readrq(dev, mrrs); 1545 if (!rc) 1546 break; 1547 1548 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n"); 1549 mrrs /= 2; 1550 } 1551 1552 if (mrrs < 128) 1553 dev_err(&dev->dev, "MRRS was unable to be configured with a " 1554 "safe value. If problems are experienced, try running " 1555 "with pci=pcie_bus_safe.\n"); 1556 } 1557 1558 static int pcie_bus_configure_set(struct pci_dev *dev, void *data) 1559 { 1560 int mps, orig_mps; 1561 1562 if (!pci_is_pcie(dev)) 1563 return 0; 1564 1565 mps = 128 << *(u8 *)data; 1566 orig_mps = pcie_get_mps(dev); 1567 1568 pcie_write_mps(dev, mps); 1569 pcie_write_mrrs(dev); 1570 1571 dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), " 1572 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, 1573 orig_mps, pcie_get_readrq(dev)); 1574 1575 return 0; 1576 } 1577 1578 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down, 1579 * parents then children fashion. If this changes, then this code will not 1580 * work as designed. 1581 */ 1582 void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) 1583 { 1584 u8 smpss; 1585 1586 if (!pci_is_pcie(bus->self)) 1587 return; 1588 1589 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) 1590 return; 1591 1592 /* FIXME - Peer to peer DMA is possible, though the endpoint would need 1593 * to be aware to the MPS of the destination. To work around this, 1594 * simply force the MPS of the entire system to the smallest possible. 1595 */ 1596 if (pcie_bus_config == PCIE_BUS_PEER2PEER) 1597 smpss = 0; 1598 1599 if (pcie_bus_config == PCIE_BUS_SAFE) { 1600 smpss = mpss; 1601 1602 pcie_find_smpss(bus->self, &smpss); 1603 pci_walk_bus(bus, pcie_find_smpss, &smpss); 1604 } 1605 1606 pcie_bus_configure_set(bus->self, &smpss); 1607 pci_walk_bus(bus, pcie_bus_configure_set, &smpss); 1608 } 1609 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); 1610 1611 unsigned int pci_scan_child_bus(struct pci_bus *bus) 1612 { 1613 unsigned int devfn, pass, max = bus->busn_res.start; 1614 struct pci_dev *dev; 1615 1616 dev_dbg(&bus->dev, "scanning bus\n"); 1617 1618 /* Go find them, Rover! */ 1619 for (devfn = 0; devfn < 0x100; devfn += 8) 1620 pci_scan_slot(bus, devfn); 1621 1622 /* Reserve buses for SR-IOV capability. */ 1623 max += pci_iov_bus_range(bus); 1624 1625 /* 1626 * After performing arch-dependent fixup of the bus, look behind 1627 * all PCI-to-PCI bridges on this bus. 1628 */ 1629 if (!bus->is_added) { 1630 dev_dbg(&bus->dev, "fixups for bus\n"); 1631 pcibios_fixup_bus(bus); 1632 bus->is_added = 1; 1633 } 1634 1635 for (pass=0; pass < 2; pass++) 1636 list_for_each_entry(dev, &bus->devices, bus_list) { 1637 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 1638 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 1639 max = pci_scan_bridge(bus, dev, max, pass); 1640 } 1641 1642 /* 1643 * We've scanned the bus and so we know all about what's on 1644 * the other side of any bridges that may be on this bus plus 1645 * any devices. 1646 * 1647 * Return how far we've got finding sub-buses. 1648 */ 1649 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); 1650 return max; 1651 } 1652 1653 /** 1654 * pcibios_root_bridge_prepare - Platform-specific host bridge setup. 1655 * @bridge: Host bridge to set up. 1656 * 1657 * Default empty implementation. Replace with an architecture-specific setup 1658 * routine, if necessary. 1659 */ 1660 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) 1661 { 1662 return 0; 1663 } 1664 1665 void __weak pcibios_add_bus(struct pci_bus *bus) 1666 { 1667 } 1668 1669 void __weak pcibios_remove_bus(struct pci_bus *bus) 1670 { 1671 } 1672 1673 struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 1674 struct pci_ops *ops, void *sysdata, struct list_head *resources) 1675 { 1676 int error; 1677 struct pci_host_bridge *bridge; 1678 struct pci_bus *b, *b2; 1679 struct pci_host_bridge_window *window, *n; 1680 struct resource *res; 1681 resource_size_t offset; 1682 char bus_addr[64]; 1683 char *fmt; 1684 1685 b = pci_alloc_bus(); 1686 if (!b) 1687 return NULL; 1688 1689 b->sysdata = sysdata; 1690 b->ops = ops; 1691 b->number = b->busn_res.start = bus; 1692 b2 = pci_find_bus(pci_domain_nr(b), bus); 1693 if (b2) { 1694 /* If we already got to this bus through a different bridge, ignore it */ 1695 dev_dbg(&b2->dev, "bus already known\n"); 1696 goto err_out; 1697 } 1698 1699 bridge = pci_alloc_host_bridge(b); 1700 if (!bridge) 1701 goto err_out; 1702 1703 bridge->dev.parent = parent; 1704 bridge->dev.release = pci_release_bus_bridge_dev; 1705 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus); 1706 error = pcibios_root_bridge_prepare(bridge); 1707 if (error) 1708 goto bridge_dev_reg_err; 1709 1710 error = device_register(&bridge->dev); 1711 if (error) 1712 goto bridge_dev_reg_err; 1713 b->bridge = get_device(&bridge->dev); 1714 device_enable_async_suspend(b->bridge); 1715 pci_set_bus_of_node(b); 1716 1717 if (!parent) 1718 set_dev_node(b->bridge, pcibus_to_node(b)); 1719 1720 b->dev.class = &pcibus_class; 1721 b->dev.parent = b->bridge; 1722 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus); 1723 error = device_register(&b->dev); 1724 if (error) 1725 goto class_dev_reg_err; 1726 1727 pcibios_add_bus(b); 1728 1729 /* Create legacy_io and legacy_mem files for this bus */ 1730 pci_create_legacy_files(b); 1731 1732 if (parent) 1733 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1734 else 1735 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); 1736 1737 /* Add initial resources to the bus */ 1738 list_for_each_entry_safe(window, n, resources, list) { 1739 list_move_tail(&window->list, &bridge->windows); 1740 res = window->res; 1741 offset = window->offset; 1742 if (res->flags & IORESOURCE_BUS) 1743 pci_bus_insert_busn_res(b, bus, res->end); 1744 else 1745 pci_bus_add_resource(b, res, 0); 1746 if (offset) { 1747 if (resource_type(res) == IORESOURCE_IO) 1748 fmt = " (bus address [%#06llx-%#06llx])"; 1749 else 1750 fmt = " (bus address [%#010llx-%#010llx])"; 1751 snprintf(bus_addr, sizeof(bus_addr), fmt, 1752 (unsigned long long) (res->start - offset), 1753 (unsigned long long) (res->end - offset)); 1754 } else 1755 bus_addr[0] = '\0'; 1756 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr); 1757 } 1758 1759 down_write(&pci_bus_sem); 1760 list_add_tail(&b->node, &pci_root_buses); 1761 up_write(&pci_bus_sem); 1762 1763 return b; 1764 1765 class_dev_reg_err: 1766 put_device(&bridge->dev); 1767 device_unregister(&bridge->dev); 1768 bridge_dev_reg_err: 1769 kfree(bridge); 1770 err_out: 1771 kfree(b); 1772 return NULL; 1773 } 1774 1775 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) 1776 { 1777 struct resource *res = &b->busn_res; 1778 struct resource *parent_res, *conflict; 1779 1780 res->start = bus; 1781 res->end = bus_max; 1782 res->flags = IORESOURCE_BUS; 1783 1784 if (!pci_is_root_bus(b)) 1785 parent_res = &b->parent->busn_res; 1786 else { 1787 parent_res = get_pci_domain_busn_res(pci_domain_nr(b)); 1788 res->flags |= IORESOURCE_PCI_FIXED; 1789 } 1790 1791 conflict = insert_resource_conflict(parent_res, res); 1792 1793 if (conflict) 1794 dev_printk(KERN_DEBUG, &b->dev, 1795 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", 1796 res, pci_is_root_bus(b) ? "domain " : "", 1797 parent_res, conflict->name, conflict); 1798 1799 return conflict == NULL; 1800 } 1801 1802 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) 1803 { 1804 struct resource *res = &b->busn_res; 1805 struct resource old_res = *res; 1806 resource_size_t size; 1807 int ret; 1808 1809 if (res->start > bus_max) 1810 return -EINVAL; 1811 1812 size = bus_max - res->start + 1; 1813 ret = adjust_resource(res, res->start, size); 1814 dev_printk(KERN_DEBUG, &b->dev, 1815 "busn_res: %pR end %s updated to %02x\n", 1816 &old_res, ret ? "can not be" : "is", bus_max); 1817 1818 if (!ret && !res->parent) 1819 pci_bus_insert_busn_res(b, res->start, res->end); 1820 1821 return ret; 1822 } 1823 1824 void pci_bus_release_busn_res(struct pci_bus *b) 1825 { 1826 struct resource *res = &b->busn_res; 1827 int ret; 1828 1829 if (!res->flags || !res->parent) 1830 return; 1831 1832 ret = release_resource(res); 1833 dev_printk(KERN_DEBUG, &b->dev, 1834 "busn_res: %pR %s released\n", 1835 res, ret ? "can not be" : "is"); 1836 } 1837 1838 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 1839 struct pci_ops *ops, void *sysdata, struct list_head *resources) 1840 { 1841 struct pci_host_bridge_window *window; 1842 bool found = false; 1843 struct pci_bus *b; 1844 int max; 1845 1846 list_for_each_entry(window, resources, list) 1847 if (window->res->flags & IORESOURCE_BUS) { 1848 found = true; 1849 break; 1850 } 1851 1852 b = pci_create_root_bus(parent, bus, ops, sysdata, resources); 1853 if (!b) 1854 return NULL; 1855 1856 if (!found) { 1857 dev_info(&b->dev, 1858 "No busn resource found for root bus, will use [bus %02x-ff]\n", 1859 bus); 1860 pci_bus_insert_busn_res(b, bus, 255); 1861 } 1862 1863 max = pci_scan_child_bus(b); 1864 1865 if (!found) 1866 pci_bus_update_busn_res_end(b, max); 1867 1868 pci_bus_add_devices(b); 1869 return b; 1870 } 1871 EXPORT_SYMBOL(pci_scan_root_bus); 1872 1873 /* Deprecated; use pci_scan_root_bus() instead */ 1874 struct pci_bus *pci_scan_bus_parented(struct device *parent, 1875 int bus, struct pci_ops *ops, void *sysdata) 1876 { 1877 LIST_HEAD(resources); 1878 struct pci_bus *b; 1879 1880 pci_add_resource(&resources, &ioport_resource); 1881 pci_add_resource(&resources, &iomem_resource); 1882 pci_add_resource(&resources, &busn_resource); 1883 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources); 1884 if (b) 1885 pci_scan_child_bus(b); 1886 else 1887 pci_free_resource_list(&resources); 1888 return b; 1889 } 1890 EXPORT_SYMBOL(pci_scan_bus_parented); 1891 1892 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, 1893 void *sysdata) 1894 { 1895 LIST_HEAD(resources); 1896 struct pci_bus *b; 1897 1898 pci_add_resource(&resources, &ioport_resource); 1899 pci_add_resource(&resources, &iomem_resource); 1900 pci_add_resource(&resources, &busn_resource); 1901 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); 1902 if (b) { 1903 pci_scan_child_bus(b); 1904 pci_bus_add_devices(b); 1905 } else { 1906 pci_free_resource_list(&resources); 1907 } 1908 return b; 1909 } 1910 EXPORT_SYMBOL(pci_scan_bus); 1911 1912 /** 1913 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices. 1914 * @bridge: PCI bridge for the bus to scan 1915 * 1916 * Scan a PCI bus and child buses for new devices, add them, 1917 * and enable them, resizing bridge mmio/io resource if necessary 1918 * and possible. The caller must ensure the child devices are already 1919 * removed for resizing to occur. 1920 * 1921 * Returns the max number of subordinate bus discovered. 1922 */ 1923 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge) 1924 { 1925 unsigned int max; 1926 struct pci_bus *bus = bridge->subordinate; 1927 1928 max = pci_scan_child_bus(bus); 1929 1930 pci_assign_unassigned_bridge_resources(bridge); 1931 1932 pci_bus_add_devices(bus); 1933 1934 return max; 1935 } 1936 1937 /** 1938 * pci_rescan_bus - scan a PCI bus for devices. 1939 * @bus: PCI bus to scan 1940 * 1941 * Scan a PCI bus and child buses for new devices, adds them, 1942 * and enables them. 1943 * 1944 * Returns the max number of subordinate bus discovered. 1945 */ 1946 unsigned int __ref pci_rescan_bus(struct pci_bus *bus) 1947 { 1948 unsigned int max; 1949 1950 max = pci_scan_child_bus(bus); 1951 pci_assign_unassigned_bus_resources(bus); 1952 pci_enable_bridges(bus); 1953 pci_bus_add_devices(bus); 1954 1955 return max; 1956 } 1957 EXPORT_SYMBOL_GPL(pci_rescan_bus); 1958 1959 EXPORT_SYMBOL(pci_add_new_bus); 1960 EXPORT_SYMBOL(pci_scan_slot); 1961 EXPORT_SYMBOL(pci_scan_bridge); 1962 EXPORT_SYMBOL_GPL(pci_scan_child_bus); 1963 1964 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) 1965 { 1966 const struct pci_dev *a = to_pci_dev(d_a); 1967 const struct pci_dev *b = to_pci_dev(d_b); 1968 1969 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 1970 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 1971 1972 if (a->bus->number < b->bus->number) return -1; 1973 else if (a->bus->number > b->bus->number) return 1; 1974 1975 if (a->devfn < b->devfn) return -1; 1976 else if (a->devfn > b->devfn) return 1; 1977 1978 return 0; 1979 } 1980 1981 void __init pci_sort_breadthfirst(void) 1982 { 1983 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); 1984 } 1985