1 /* 2 * probe.c - PCI detection and setup code 3 */ 4 5 #include <linux/kernel.h> 6 #include <linux/delay.h> 7 #include <linux/init.h> 8 #include <linux/pci.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/cpumask.h> 12 #include <linux/aspm.h> 13 #include "pci.h" 14 15 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 16 #define CARDBUS_RESERVE_BUSNR 3 17 #define PCI_CFG_SPACE_SIZE 256 18 #define PCI_CFG_SPACE_EXP_SIZE 4096 19 20 /* Ugh. Need to stop exporting this to modules. */ 21 LIST_HEAD(pci_root_buses); 22 EXPORT_SYMBOL(pci_root_buses); 23 24 LIST_HEAD(pci_devices); 25 26 /* 27 * Some device drivers need know if pci is initiated. 28 * Basically, we think pci is not initiated when there 29 * is no device in list of pci_devices. 30 */ 31 int no_pci_devices(void) 32 { 33 return list_empty(&pci_devices); 34 } 35 36 EXPORT_SYMBOL(no_pci_devices); 37 38 #ifdef HAVE_PCI_LEGACY 39 /** 40 * pci_create_legacy_files - create legacy I/O port and memory files 41 * @b: bus to create files under 42 * 43 * Some platforms allow access to legacy I/O port and ISA memory space on 44 * a per-bus basis. This routine creates the files and ties them into 45 * their associated read, write and mmap files from pci-sysfs.c 46 */ 47 static void pci_create_legacy_files(struct pci_bus *b) 48 { 49 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, 50 GFP_ATOMIC); 51 if (b->legacy_io) { 52 b->legacy_io->attr.name = "legacy_io"; 53 b->legacy_io->size = 0xffff; 54 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; 55 b->legacy_io->read = pci_read_legacy_io; 56 b->legacy_io->write = pci_write_legacy_io; 57 device_create_bin_file(&b->dev, b->legacy_io); 58 59 /* Allocated above after the legacy_io struct */ 60 b->legacy_mem = b->legacy_io + 1; 61 b->legacy_mem->attr.name = "legacy_mem"; 62 b->legacy_mem->size = 1024*1024; 63 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 64 b->legacy_mem->mmap = pci_mmap_legacy_mem; 65 device_create_bin_file(&b->dev, b->legacy_mem); 66 } 67 } 68 69 void pci_remove_legacy_files(struct pci_bus *b) 70 { 71 if (b->legacy_io) { 72 device_remove_bin_file(&b->dev, b->legacy_io); 73 device_remove_bin_file(&b->dev, b->legacy_mem); 74 kfree(b->legacy_io); /* both are allocated here */ 75 } 76 } 77 #else /* !HAVE_PCI_LEGACY */ 78 static inline void pci_create_legacy_files(struct pci_bus *bus) { return; } 79 void pci_remove_legacy_files(struct pci_bus *bus) { return; } 80 #endif /* HAVE_PCI_LEGACY */ 81 82 /* 83 * PCI Bus Class Devices 84 */ 85 static ssize_t pci_bus_show_cpuaffinity(struct device *dev, 86 struct device_attribute *attr, 87 char *buf) 88 { 89 int ret; 90 cpumask_t cpumask; 91 92 cpumask = pcibus_to_cpumask(to_pci_bus(dev)); 93 ret = cpumask_scnprintf(buf, PAGE_SIZE, cpumask); 94 if (ret < PAGE_SIZE) 95 buf[ret++] = '\n'; 96 return ret; 97 } 98 DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpuaffinity, NULL); 99 100 /* 101 * PCI Bus Class 102 */ 103 static void release_pcibus_dev(struct device *dev) 104 { 105 struct pci_bus *pci_bus = to_pci_bus(dev); 106 107 if (pci_bus->bridge) 108 put_device(pci_bus->bridge); 109 kfree(pci_bus); 110 } 111 112 static struct class pcibus_class = { 113 .name = "pci_bus", 114 .dev_release = &release_pcibus_dev, 115 }; 116 117 static int __init pcibus_class_init(void) 118 { 119 return class_register(&pcibus_class); 120 } 121 postcore_initcall(pcibus_class_init); 122 123 /* 124 * Translate the low bits of the PCI base 125 * to the resource type 126 */ 127 static inline unsigned int pci_calc_resource_flags(unsigned int flags) 128 { 129 if (flags & PCI_BASE_ADDRESS_SPACE_IO) 130 return IORESOURCE_IO; 131 132 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) 133 return IORESOURCE_MEM | IORESOURCE_PREFETCH; 134 135 return IORESOURCE_MEM; 136 } 137 138 /* 139 * Find the extent of a PCI decode.. 140 */ 141 static u32 pci_size(u32 base, u32 maxbase, u32 mask) 142 { 143 u32 size = mask & maxbase; /* Find the significant bits */ 144 if (!size) 145 return 0; 146 147 /* Get the lowest of them to find the decode size, and 148 from that the extent. */ 149 size = (size & ~(size-1)) - 1; 150 151 /* base == maxbase can be valid only if the BAR has 152 already been programmed with all 1s. */ 153 if (base == maxbase && ((base | size) & mask) != mask) 154 return 0; 155 156 return size; 157 } 158 159 static u64 pci_size64(u64 base, u64 maxbase, u64 mask) 160 { 161 u64 size = mask & maxbase; /* Find the significant bits */ 162 if (!size) 163 return 0; 164 165 /* Get the lowest of them to find the decode size, and 166 from that the extent. */ 167 size = (size & ~(size-1)) - 1; 168 169 /* base == maxbase can be valid only if the BAR has 170 already been programmed with all 1s. */ 171 if (base == maxbase && ((base | size) & mask) != mask) 172 return 0; 173 174 return size; 175 } 176 177 static inline int is_64bit_memory(u32 mask) 178 { 179 if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == 180 (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) 181 return 1; 182 return 0; 183 } 184 185 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 186 { 187 unsigned int pos, reg, next; 188 u32 l, sz; 189 struct resource *res; 190 191 for(pos=0; pos<howmany; pos = next) { 192 u64 l64; 193 u64 sz64; 194 u32 raw_sz; 195 196 next = pos+1; 197 res = &dev->resource[pos]; 198 res->name = pci_name(dev); 199 reg = PCI_BASE_ADDRESS_0 + (pos << 2); 200 pci_read_config_dword(dev, reg, &l); 201 pci_write_config_dword(dev, reg, ~0); 202 pci_read_config_dword(dev, reg, &sz); 203 pci_write_config_dword(dev, reg, l); 204 if (!sz || sz == 0xffffffff) 205 continue; 206 if (l == 0xffffffff) 207 l = 0; 208 raw_sz = sz; 209 if ((l & PCI_BASE_ADDRESS_SPACE) == 210 PCI_BASE_ADDRESS_SPACE_MEMORY) { 211 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); 212 /* 213 * For 64bit prefetchable memory sz could be 0, if the 214 * real size is bigger than 4G, so we need to check 215 * szhi for that. 216 */ 217 if (!is_64bit_memory(l) && !sz) 218 continue; 219 res->start = l & PCI_BASE_ADDRESS_MEM_MASK; 220 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; 221 } else { 222 sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); 223 if (!sz) 224 continue; 225 res->start = l & PCI_BASE_ADDRESS_IO_MASK; 226 res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; 227 } 228 res->end = res->start + (unsigned long) sz; 229 res->flags |= pci_calc_resource_flags(l); 230 if (is_64bit_memory(l)) { 231 u32 szhi, lhi; 232 233 pci_read_config_dword(dev, reg+4, &lhi); 234 pci_write_config_dword(dev, reg+4, ~0); 235 pci_read_config_dword(dev, reg+4, &szhi); 236 pci_write_config_dword(dev, reg+4, lhi); 237 sz64 = ((u64)szhi << 32) | raw_sz; 238 l64 = ((u64)lhi << 32) | l; 239 sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); 240 next++; 241 #if BITS_PER_LONG == 64 242 if (!sz64) { 243 res->start = 0; 244 res->end = 0; 245 res->flags = 0; 246 continue; 247 } 248 res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; 249 res->end = res->start + sz64; 250 #else 251 if (sz64 > 0x100000000ULL) { 252 printk(KERN_ERR "PCI: Unable to handle 64-bit " 253 "BAR for device %s\n", pci_name(dev)); 254 res->start = 0; 255 res->flags = 0; 256 } else if (lhi) { 257 /* 64-bit wide address, treat as disabled */ 258 pci_write_config_dword(dev, reg, 259 l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); 260 pci_write_config_dword(dev, reg+4, 0); 261 res->start = 0; 262 res->end = sz; 263 } 264 #endif 265 } 266 } 267 if (rom) { 268 dev->rom_base_reg = rom; 269 res = &dev->resource[PCI_ROM_RESOURCE]; 270 res->name = pci_name(dev); 271 pci_read_config_dword(dev, rom, &l); 272 pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE); 273 pci_read_config_dword(dev, rom, &sz); 274 pci_write_config_dword(dev, rom, l); 275 if (l == 0xffffffff) 276 l = 0; 277 if (sz && sz != 0xffffffff) { 278 sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); 279 if (sz) { 280 res->flags = (l & IORESOURCE_ROM_ENABLE) | 281 IORESOURCE_MEM | IORESOURCE_PREFETCH | 282 IORESOURCE_READONLY | IORESOURCE_CACHEABLE; 283 res->start = l & PCI_ROM_ADDRESS_MASK; 284 res->end = res->start + (unsigned long) sz; 285 } 286 } 287 } 288 } 289 290 void pci_read_bridge_bases(struct pci_bus *child) 291 { 292 struct pci_dev *dev = child->self; 293 u8 io_base_lo, io_limit_lo; 294 u16 mem_base_lo, mem_limit_lo; 295 unsigned long base, limit; 296 struct resource *res; 297 int i; 298 299 if (!dev) /* It's a host bus, nothing to read */ 300 return; 301 302 if (dev->transparent) { 303 printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev)); 304 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) 305 child->resource[i] = child->parent->resource[i - 3]; 306 } 307 308 for(i=0; i<3; i++) 309 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; 310 311 res = child->resource[0]; 312 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 313 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 314 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8; 315 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8; 316 317 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { 318 u16 io_base_hi, io_limit_hi; 319 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); 320 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); 321 base |= (io_base_hi << 16); 322 limit |= (io_limit_hi << 16); 323 } 324 325 if (base <= limit) { 326 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; 327 if (!res->start) 328 res->start = base; 329 if (!res->end) 330 res->end = limit + 0xfff; 331 } 332 333 res = child->resource[1]; 334 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); 335 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); 336 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; 337 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; 338 if (base <= limit) { 339 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 340 res->start = base; 341 res->end = limit + 0xfffff; 342 } 343 344 res = child->resource[2]; 345 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); 346 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); 347 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; 348 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; 349 350 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { 351 u32 mem_base_hi, mem_limit_hi; 352 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); 353 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); 354 355 /* 356 * Some bridges set the base > limit by default, and some 357 * (broken) BIOSes do not initialize them. If we find 358 * this, just assume they are not being used. 359 */ 360 if (mem_base_hi <= mem_limit_hi) { 361 #if BITS_PER_LONG == 64 362 base |= ((long) mem_base_hi) << 32; 363 limit |= ((long) mem_limit_hi) << 32; 364 #else 365 if (mem_base_hi || mem_limit_hi) { 366 printk(KERN_ERR "PCI: Unable to handle 64-bit address space for bridge %s\n", pci_name(dev)); 367 return; 368 } 369 #endif 370 } 371 } 372 if (base <= limit) { 373 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; 374 res->start = base; 375 res->end = limit + 0xfffff; 376 } 377 } 378 379 static struct pci_bus * pci_alloc_bus(void) 380 { 381 struct pci_bus *b; 382 383 b = kzalloc(sizeof(*b), GFP_KERNEL); 384 if (b) { 385 INIT_LIST_HEAD(&b->node); 386 INIT_LIST_HEAD(&b->children); 387 INIT_LIST_HEAD(&b->devices); 388 } 389 return b; 390 } 391 392 static struct pci_bus * __devinit 393 pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr) 394 { 395 struct pci_bus *child; 396 int i; 397 398 /* 399 * Allocate a new bus, and inherit stuff from the parent.. 400 */ 401 child = pci_alloc_bus(); 402 if (!child) 403 return NULL; 404 405 child->self = bridge; 406 child->parent = parent; 407 child->ops = parent->ops; 408 child->sysdata = parent->sysdata; 409 child->bus_flags = parent->bus_flags; 410 child->bridge = get_device(&bridge->dev); 411 412 /* initialize some portions of the bus device, but don't register it 413 * now as the parent is not properly set up yet. This device will get 414 * registered later in pci_bus_add_devices() 415 */ 416 child->dev.class = &pcibus_class; 417 sprintf(child->dev.bus_id, "%04x:%02x", pci_domain_nr(child), busnr); 418 419 /* 420 * Set up the primary, secondary and subordinate 421 * bus numbers. 422 */ 423 child->number = child->secondary = busnr; 424 child->primary = parent->secondary; 425 child->subordinate = 0xff; 426 427 /* Set up default resource pointers and names.. */ 428 for (i = 0; i < 4; i++) { 429 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 430 child->resource[i]->name = child->name; 431 } 432 bridge->subordinate = child; 433 434 return child; 435 } 436 437 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) 438 { 439 struct pci_bus *child; 440 441 child = pci_alloc_child_bus(parent, dev, busnr); 442 if (child) { 443 down_write(&pci_bus_sem); 444 list_add_tail(&child->node, &parent->children); 445 up_write(&pci_bus_sem); 446 } 447 return child; 448 } 449 450 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) 451 { 452 struct pci_bus *parent = child->parent; 453 454 /* Attempts to fix that up are really dangerous unless 455 we're going to re-assign all bus numbers. */ 456 if (!pcibios_assign_all_busses()) 457 return; 458 459 while (parent->parent && parent->subordinate < max) { 460 parent->subordinate = max; 461 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); 462 parent = parent->parent; 463 } 464 } 465 466 /* 467 * If it's a bridge, configure it and scan the bus behind it. 468 * For CardBus bridges, we don't scan behind as the devices will 469 * be handled by the bridge driver itself. 470 * 471 * We need to process bridges in two passes -- first we scan those 472 * already configured by the BIOS and after we are done with all of 473 * them, we proceed to assigning numbers to the remaining buses in 474 * order to avoid overlaps between old and new bus numbers. 475 */ 476 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass) 477 { 478 struct pci_bus *child; 479 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 480 u32 buses, i, j = 0; 481 u16 bctl; 482 483 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 484 485 pr_debug("PCI: Scanning behind PCI bridge %s, config %06x, pass %d\n", 486 pci_name(dev), buses & 0xffffff, pass); 487 488 /* Disable MasterAbortMode during probing to avoid reporting 489 of bus errors (in some architectures) */ 490 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 491 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 492 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 493 494 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) { 495 unsigned int cmax, busnr; 496 /* 497 * Bus already configured by firmware, process it in the first 498 * pass and just note the configuration. 499 */ 500 if (pass) 501 goto out; 502 busnr = (buses >> 8) & 0xFF; 503 504 /* 505 * If we already got to this bus through a different bridge, 506 * ignore it. This can happen with the i450NX chipset. 507 */ 508 if (pci_find_bus(pci_domain_nr(bus), busnr)) { 509 printk(KERN_INFO "PCI: Bus %04x:%02x already known\n", 510 pci_domain_nr(bus), busnr); 511 goto out; 512 } 513 514 child = pci_add_new_bus(bus, dev, busnr); 515 if (!child) 516 goto out; 517 child->primary = buses & 0xFF; 518 child->subordinate = (buses >> 16) & 0xFF; 519 child->bridge_ctl = bctl; 520 521 cmax = pci_scan_child_bus(child); 522 if (cmax > max) 523 max = cmax; 524 if (child->subordinate > max) 525 max = child->subordinate; 526 } else { 527 /* 528 * We need to assign a number to this bus which we always 529 * do in the second pass. 530 */ 531 if (!pass) { 532 if (pcibios_assign_all_busses()) 533 /* Temporarily disable forwarding of the 534 configuration cycles on all bridges in 535 this bus segment to avoid possible 536 conflicts in the second pass between two 537 bridges programmed with overlapping 538 bus ranges. */ 539 pci_write_config_dword(dev, PCI_PRIMARY_BUS, 540 buses & ~0xffffff); 541 goto out; 542 } 543 544 /* Clear errors */ 545 pci_write_config_word(dev, PCI_STATUS, 0xffff); 546 547 /* Prevent assigning a bus number that already exists. 548 * This can happen when a bridge is hot-plugged */ 549 if (pci_find_bus(pci_domain_nr(bus), max+1)) 550 goto out; 551 child = pci_add_new_bus(bus, dev, ++max); 552 buses = (buses & 0xff000000) 553 | ((unsigned int)(child->primary) << 0) 554 | ((unsigned int)(child->secondary) << 8) 555 | ((unsigned int)(child->subordinate) << 16); 556 557 /* 558 * yenta.c forces a secondary latency timer of 176. 559 * Copy that behaviour here. 560 */ 561 if (is_cardbus) { 562 buses &= ~0xff000000; 563 buses |= CARDBUS_LATENCY_TIMER << 24; 564 } 565 566 /* 567 * We need to blast all three values with a single write. 568 */ 569 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 570 571 if (!is_cardbus) { 572 child->bridge_ctl = bctl; 573 /* 574 * Adjust subordinate busnr in parent buses. 575 * We do this before scanning for children because 576 * some devices may not be detected if the bios 577 * was lazy. 578 */ 579 pci_fixup_parent_subordinate_busnr(child, max); 580 /* Now we can scan all subordinate buses... */ 581 max = pci_scan_child_bus(child); 582 /* 583 * now fix it up again since we have found 584 * the real value of max. 585 */ 586 pci_fixup_parent_subordinate_busnr(child, max); 587 } else { 588 /* 589 * For CardBus bridges, we leave 4 bus numbers 590 * as cards with a PCI-to-PCI bridge can be 591 * inserted later. 592 */ 593 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) { 594 struct pci_bus *parent = bus; 595 if (pci_find_bus(pci_domain_nr(bus), 596 max+i+1)) 597 break; 598 while (parent->parent) { 599 if ((!pcibios_assign_all_busses()) && 600 (parent->subordinate > max) && 601 (parent->subordinate <= max+i)) { 602 j = 1; 603 } 604 parent = parent->parent; 605 } 606 if (j) { 607 /* 608 * Often, there are two cardbus bridges 609 * -- try to leave one valid bus number 610 * for each one. 611 */ 612 i /= 2; 613 break; 614 } 615 } 616 max += i; 617 pci_fixup_parent_subordinate_busnr(child, max); 618 } 619 /* 620 * Set the subordinate bus number to its real value. 621 */ 622 child->subordinate = max; 623 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); 624 } 625 626 sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number); 627 628 /* Has only triggered on CardBus, fixup is in yenta_socket */ 629 while (bus->parent) { 630 if ((child->subordinate > bus->subordinate) || 631 (child->number > bus->subordinate) || 632 (child->number < bus->number) || 633 (child->subordinate < bus->number)) { 634 pr_debug("PCI: Bus #%02x (-#%02x) is %s " 635 "hidden behind%s bridge #%02x (-#%02x)\n", 636 child->number, child->subordinate, 637 (bus->number > child->subordinate && 638 bus->subordinate < child->number) ? 639 "wholly" : "partially", 640 bus->self->transparent ? " transparent" : "", 641 bus->number, bus->subordinate); 642 } 643 bus = bus->parent; 644 } 645 646 out: 647 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); 648 649 return max; 650 } 651 652 /* 653 * Read interrupt line and base address registers. 654 * The architecture-dependent code can tweak these, of course. 655 */ 656 static void pci_read_irq(struct pci_dev *dev) 657 { 658 unsigned char irq; 659 660 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); 661 dev->pin = irq; 662 if (irq) 663 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 664 dev->irq = irq; 665 } 666 667 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 668 669 /** 670 * pci_setup_device - fill in class and map information of a device 671 * @dev: the device structure to fill 672 * 673 * Initialize the device structure with information about the device's 674 * vendor,class,memory and IO-space addresses,IRQ lines etc. 675 * Called at initialisation of the PCI subsystem and by CardBus services. 676 * Returns 0 on success and -1 if unknown type of device (not normal, bridge 677 * or CardBus). 678 */ 679 static int pci_setup_device(struct pci_dev * dev) 680 { 681 u32 class; 682 683 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 684 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); 685 686 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 687 dev->revision = class & 0xff; 688 class >>= 8; /* upper 3 bytes */ 689 dev->class = class; 690 class >>= 8; 691 692 pr_debug("PCI: Found %s [%04x/%04x] %06x %02x\n", pci_name(dev), 693 dev->vendor, dev->device, class, dev->hdr_type); 694 695 /* "Unknown power state" */ 696 dev->current_state = PCI_UNKNOWN; 697 698 /* Early fixups, before probing the BARs */ 699 pci_fixup_device(pci_fixup_early, dev); 700 class = dev->class >> 8; 701 702 switch (dev->hdr_type) { /* header type */ 703 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 704 if (class == PCI_CLASS_BRIDGE_PCI) 705 goto bad; 706 pci_read_irq(dev); 707 pci_read_bases(dev, 6, PCI_ROM_ADDRESS); 708 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 709 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); 710 711 /* 712 * Do the ugly legacy mode stuff here rather than broken chip 713 * quirk code. Legacy mode ATA controllers have fixed 714 * addresses. These are not always echoed in BAR0-3, and 715 * BAR0-3 in a few cases contain junk! 716 */ 717 if (class == PCI_CLASS_STORAGE_IDE) { 718 u8 progif; 719 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 720 if ((progif & 1) == 0) { 721 dev->resource[0].start = 0x1F0; 722 dev->resource[0].end = 0x1F7; 723 dev->resource[0].flags = LEGACY_IO_RESOURCE; 724 dev->resource[1].start = 0x3F6; 725 dev->resource[1].end = 0x3F6; 726 dev->resource[1].flags = LEGACY_IO_RESOURCE; 727 } 728 if ((progif & 4) == 0) { 729 dev->resource[2].start = 0x170; 730 dev->resource[2].end = 0x177; 731 dev->resource[2].flags = LEGACY_IO_RESOURCE; 732 dev->resource[3].start = 0x376; 733 dev->resource[3].end = 0x376; 734 dev->resource[3].flags = LEGACY_IO_RESOURCE; 735 } 736 } 737 break; 738 739 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ 740 if (class != PCI_CLASS_BRIDGE_PCI) 741 goto bad; 742 /* The PCI-to-PCI bridge spec requires that subtractive 743 decoding (i.e. transparent) bridge must have programming 744 interface code of 0x01. */ 745 pci_read_irq(dev); 746 dev->transparent = ((dev->class & 0xff) == 1); 747 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 748 break; 749 750 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 751 if (class != PCI_CLASS_BRIDGE_CARDBUS) 752 goto bad; 753 pci_read_irq(dev); 754 pci_read_bases(dev, 1, 0); 755 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 756 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); 757 break; 758 759 default: /* unknown header */ 760 printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", 761 pci_name(dev), dev->hdr_type); 762 return -1; 763 764 bad: 765 printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", 766 pci_name(dev), class, dev->hdr_type); 767 dev->class = PCI_CLASS_NOT_DEFINED; 768 } 769 770 /* We found a fine healthy device, go go go... */ 771 return 0; 772 } 773 774 /** 775 * pci_release_dev - free a pci device structure when all users of it are finished. 776 * @dev: device that's been disconnected 777 * 778 * Will be called only by the device core when all users of this pci device are 779 * done. 780 */ 781 static void pci_release_dev(struct device *dev) 782 { 783 struct pci_dev *pci_dev; 784 785 pci_dev = to_pci_dev(dev); 786 kfree(pci_dev); 787 } 788 789 static void set_pcie_port_type(struct pci_dev *pdev) 790 { 791 int pos; 792 u16 reg16; 793 794 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 795 if (!pos) 796 return; 797 pdev->is_pcie = 1; 798 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); 799 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; 800 } 801 802 /** 803 * pci_cfg_space_size - get the configuration space size of the PCI device. 804 * @dev: PCI device 805 * 806 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices 807 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 808 * access it. Maybe we don't have a way to generate extended config space 809 * accesses, or the device is behind a reverse Express bridge. So we try 810 * reading the dword at 0x100 which must either be 0 or a valid extended 811 * capability header. 812 */ 813 int pci_cfg_space_size(struct pci_dev *dev) 814 { 815 int pos; 816 u32 status; 817 818 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 819 if (!pos) { 820 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 821 if (!pos) 822 goto fail; 823 824 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); 825 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) 826 goto fail; 827 } 828 829 if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL) 830 goto fail; 831 if (status == 0xffffffff) 832 goto fail; 833 834 return PCI_CFG_SPACE_EXP_SIZE; 835 836 fail: 837 return PCI_CFG_SPACE_SIZE; 838 } 839 840 static void pci_release_bus_bridge_dev(struct device *dev) 841 { 842 kfree(dev); 843 } 844 845 struct pci_dev *alloc_pci_dev(void) 846 { 847 struct pci_dev *dev; 848 849 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); 850 if (!dev) 851 return NULL; 852 853 INIT_LIST_HEAD(&dev->global_list); 854 INIT_LIST_HEAD(&dev->bus_list); 855 856 pci_msi_init_pci_dev(dev); 857 858 return dev; 859 } 860 EXPORT_SYMBOL(alloc_pci_dev); 861 862 /* 863 * Read the config data for a PCI device, sanity-check it 864 * and fill in the dev structure... 865 */ 866 static struct pci_dev * __devinit 867 pci_scan_device(struct pci_bus *bus, int devfn) 868 { 869 struct pci_dev *dev; 870 u32 l; 871 u8 hdr_type; 872 int delay = 1; 873 874 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l)) 875 return NULL; 876 877 /* some broken boards return 0 or ~0 if a slot is empty: */ 878 if (l == 0xffffffff || l == 0x00000000 || 879 l == 0x0000ffff || l == 0xffff0000) 880 return NULL; 881 882 /* Configuration request Retry Status */ 883 while (l == 0xffff0001) { 884 msleep(delay); 885 delay *= 2; 886 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l)) 887 return NULL; 888 /* Card hasn't responded in 60 seconds? Must be stuck. */ 889 if (delay > 60 * 1000) { 890 printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " 891 "responding\n", pci_domain_nr(bus), 892 bus->number, PCI_SLOT(devfn), 893 PCI_FUNC(devfn)); 894 return NULL; 895 } 896 } 897 898 if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type)) 899 return NULL; 900 901 dev = alloc_pci_dev(); 902 if (!dev) 903 return NULL; 904 905 dev->bus = bus; 906 dev->sysdata = bus->sysdata; 907 dev->dev.parent = bus->bridge; 908 dev->dev.bus = &pci_bus_type; 909 dev->devfn = devfn; 910 dev->hdr_type = hdr_type & 0x7f; 911 dev->multifunction = !!(hdr_type & 0x80); 912 dev->vendor = l & 0xffff; 913 dev->device = (l >> 16) & 0xffff; 914 dev->cfg_size = pci_cfg_space_size(dev); 915 dev->error_state = pci_channel_io_normal; 916 set_pcie_port_type(dev); 917 918 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 919 set this higher, assuming the system even supports it. */ 920 dev->dma_mask = 0xffffffff; 921 if (pci_setup_device(dev) < 0) { 922 kfree(dev); 923 return NULL; 924 } 925 926 return dev; 927 } 928 929 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 930 { 931 device_initialize(&dev->dev); 932 dev->dev.release = pci_release_dev; 933 pci_dev_get(dev); 934 935 set_dev_node(&dev->dev, pcibus_to_node(bus)); 936 dev->dev.dma_mask = &dev->dma_mask; 937 dev->dev.coherent_dma_mask = 0xffffffffull; 938 939 /* Fix up broken headers */ 940 pci_fixup_device(pci_fixup_header, dev); 941 942 /* 943 * Add the device to our list of discovered devices 944 * and the bus list for fixup functions, etc. 945 */ 946 INIT_LIST_HEAD(&dev->global_list); 947 down_write(&pci_bus_sem); 948 list_add_tail(&dev->bus_list, &bus->devices); 949 up_write(&pci_bus_sem); 950 } 951 952 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) 953 { 954 struct pci_dev *dev; 955 956 dev = pci_scan_device(bus, devfn); 957 if (!dev) 958 return NULL; 959 960 pci_device_add(dev, bus); 961 962 return dev; 963 } 964 EXPORT_SYMBOL(pci_scan_single_device); 965 966 /** 967 * pci_scan_slot - scan a PCI slot on a bus for devices. 968 * @bus: PCI bus to scan 969 * @devfn: slot number to scan (must have zero function.) 970 * 971 * Scan a PCI slot on the specified PCI bus for devices, adding 972 * discovered devices to the @bus->devices list. New devices 973 * will have an empty dev->global_list head. 974 */ 975 int pci_scan_slot(struct pci_bus *bus, int devfn) 976 { 977 int func, nr = 0; 978 int scan_all_fns; 979 980 scan_all_fns = pcibios_scan_all_fns(bus, devfn); 981 982 for (func = 0; func < 8; func++, devfn++) { 983 struct pci_dev *dev; 984 985 dev = pci_scan_single_device(bus, devfn); 986 if (dev) { 987 nr++; 988 989 /* 990 * If this is a single function device, 991 * don't scan past the first function. 992 */ 993 if (!dev->multifunction) { 994 if (func > 0) { 995 dev->multifunction = 1; 996 } else { 997 break; 998 } 999 } 1000 } else { 1001 if (func == 0 && !scan_all_fns) 1002 break; 1003 } 1004 } 1005 1006 if (bus->self) 1007 pcie_aspm_init_link_state(bus->self); 1008 1009 return nr; 1010 } 1011 1012 unsigned int pci_scan_child_bus(struct pci_bus *bus) 1013 { 1014 unsigned int devfn, pass, max = bus->secondary; 1015 struct pci_dev *dev; 1016 1017 pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number); 1018 1019 /* Go find them, Rover! */ 1020 for (devfn = 0; devfn < 0x100; devfn += 8) 1021 pci_scan_slot(bus, devfn); 1022 1023 /* 1024 * After performing arch-dependent fixup of the bus, look behind 1025 * all PCI-to-PCI bridges on this bus. 1026 */ 1027 pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number); 1028 pcibios_fixup_bus(bus); 1029 for (pass=0; pass < 2; pass++) 1030 list_for_each_entry(dev, &bus->devices, bus_list) { 1031 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 1032 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 1033 max = pci_scan_bridge(bus, dev, max, pass); 1034 } 1035 1036 /* 1037 * We've scanned the bus and so we know all about what's on 1038 * the other side of any bridges that may be on this bus plus 1039 * any devices. 1040 * 1041 * Return how far we've got finding sub-buses. 1042 */ 1043 pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n", 1044 pci_domain_nr(bus), bus->number, max); 1045 return max; 1046 } 1047 1048 unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus) 1049 { 1050 unsigned int max; 1051 1052 max = pci_scan_child_bus(bus); 1053 1054 /* 1055 * Make the discovered devices available. 1056 */ 1057 pci_bus_add_devices(bus); 1058 1059 return max; 1060 } 1061 1062 struct pci_bus * pci_create_bus(struct device *parent, 1063 int bus, struct pci_ops *ops, void *sysdata) 1064 { 1065 int error; 1066 struct pci_bus *b; 1067 struct device *dev; 1068 1069 b = pci_alloc_bus(); 1070 if (!b) 1071 return NULL; 1072 1073 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 1074 if (!dev){ 1075 kfree(b); 1076 return NULL; 1077 } 1078 1079 b->sysdata = sysdata; 1080 b->ops = ops; 1081 1082 if (pci_find_bus(pci_domain_nr(b), bus)) { 1083 /* If we already got to this bus through a different bridge, ignore it */ 1084 pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus); 1085 goto err_out; 1086 } 1087 1088 down_write(&pci_bus_sem); 1089 list_add_tail(&b->node, &pci_root_buses); 1090 up_write(&pci_bus_sem); 1091 1092 memset(dev, 0, sizeof(*dev)); 1093 dev->parent = parent; 1094 dev->release = pci_release_bus_bridge_dev; 1095 sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus); 1096 error = device_register(dev); 1097 if (error) 1098 goto dev_reg_err; 1099 b->bridge = get_device(dev); 1100 1101 b->dev.class = &pcibus_class; 1102 b->dev.parent = b->bridge; 1103 sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus); 1104 error = device_register(&b->dev); 1105 if (error) 1106 goto class_dev_reg_err; 1107 error = device_create_file(&b->dev, &dev_attr_cpuaffinity); 1108 if (error) 1109 goto dev_create_file_err; 1110 1111 /* Create legacy_io and legacy_mem files for this bus */ 1112 pci_create_legacy_files(b); 1113 1114 b->number = b->secondary = bus; 1115 b->resource[0] = &ioport_resource; 1116 b->resource[1] = &iomem_resource; 1117 1118 return b; 1119 1120 dev_create_file_err: 1121 device_unregister(&b->dev); 1122 class_dev_reg_err: 1123 device_unregister(dev); 1124 dev_reg_err: 1125 down_write(&pci_bus_sem); 1126 list_del(&b->node); 1127 up_write(&pci_bus_sem); 1128 err_out: 1129 kfree(dev); 1130 kfree(b); 1131 return NULL; 1132 } 1133 1134 struct pci_bus *pci_scan_bus_parented(struct device *parent, 1135 int bus, struct pci_ops *ops, void *sysdata) 1136 { 1137 struct pci_bus *b; 1138 1139 b = pci_create_bus(parent, bus, ops, sysdata); 1140 if (b) 1141 b->subordinate = pci_scan_child_bus(b); 1142 return b; 1143 } 1144 EXPORT_SYMBOL(pci_scan_bus_parented); 1145 1146 #ifdef CONFIG_HOTPLUG 1147 EXPORT_SYMBOL(pci_add_new_bus); 1148 EXPORT_SYMBOL(pci_do_scan_bus); 1149 EXPORT_SYMBOL(pci_scan_slot); 1150 EXPORT_SYMBOL(pci_scan_bridge); 1151 EXPORT_SYMBOL_GPL(pci_scan_child_bus); 1152 #endif 1153 1154 static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev *b) 1155 { 1156 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 1157 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 1158 1159 if (a->bus->number < b->bus->number) return -1; 1160 else if (a->bus->number > b->bus->number) return 1; 1161 1162 if (a->devfn < b->devfn) return -1; 1163 else if (a->devfn > b->devfn) return 1; 1164 1165 return 0; 1166 } 1167 1168 /* 1169 * Yes, this forcably breaks the klist abstraction temporarily. It 1170 * just wants to sort the klist, not change reference counts and 1171 * take/drop locks rapidly in the process. It does all this while 1172 * holding the lock for the list, so objects can't otherwise be 1173 * added/removed while we're swizzling. 1174 */ 1175 static void __init pci_insertion_sort_klist(struct pci_dev *a, struct list_head *list) 1176 { 1177 struct list_head *pos; 1178 struct klist_node *n; 1179 struct device *dev; 1180 struct pci_dev *b; 1181 1182 list_for_each(pos, list) { 1183 n = container_of(pos, struct klist_node, n_node); 1184 dev = container_of(n, struct device, knode_bus); 1185 b = to_pci_dev(dev); 1186 if (pci_sort_bf_cmp(a, b) <= 0) { 1187 list_move_tail(&a->dev.knode_bus.n_node, &b->dev.knode_bus.n_node); 1188 return; 1189 } 1190 } 1191 list_move_tail(&a->dev.knode_bus.n_node, list); 1192 } 1193 1194 static void __init pci_sort_breadthfirst_klist(void) 1195 { 1196 LIST_HEAD(sorted_devices); 1197 struct list_head *pos, *tmp; 1198 struct klist_node *n; 1199 struct device *dev; 1200 struct pci_dev *pdev; 1201 struct klist *device_klist; 1202 1203 device_klist = bus_get_device_klist(&pci_bus_type); 1204 1205 spin_lock(&device_klist->k_lock); 1206 list_for_each_safe(pos, tmp, &device_klist->k_list) { 1207 n = container_of(pos, struct klist_node, n_node); 1208 dev = container_of(n, struct device, knode_bus); 1209 pdev = to_pci_dev(dev); 1210 pci_insertion_sort_klist(pdev, &sorted_devices); 1211 } 1212 list_splice(&sorted_devices, &device_klist->k_list); 1213 spin_unlock(&device_klist->k_lock); 1214 } 1215 1216 static void __init pci_insertion_sort_devices(struct pci_dev *a, struct list_head *list) 1217 { 1218 struct pci_dev *b; 1219 1220 list_for_each_entry(b, list, global_list) { 1221 if (pci_sort_bf_cmp(a, b) <= 0) { 1222 list_move_tail(&a->global_list, &b->global_list); 1223 return; 1224 } 1225 } 1226 list_move_tail(&a->global_list, list); 1227 } 1228 1229 static void __init pci_sort_breadthfirst_devices(void) 1230 { 1231 LIST_HEAD(sorted_devices); 1232 struct pci_dev *dev, *tmp; 1233 1234 down_write(&pci_bus_sem); 1235 list_for_each_entry_safe(dev, tmp, &pci_devices, global_list) { 1236 pci_insertion_sort_devices(dev, &sorted_devices); 1237 } 1238 list_splice(&sorted_devices, &pci_devices); 1239 up_write(&pci_bus_sem); 1240 } 1241 1242 void __init pci_sort_breadthfirst(void) 1243 { 1244 pci_sort_breadthfirst_devices(); 1245 pci_sort_breadthfirst_klist(); 1246 } 1247 1248