1 /* 2 * probe.c - PCI detection and setup code 3 */ 4 5 #include <linux/kernel.h> 6 #include <linux/delay.h> 7 #include <linux/init.h> 8 #include <linux/pci.h> 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 #include <linux/cpumask.h> 12 #include "pci.h" 13 14 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 15 #define CARDBUS_RESERVE_BUSNR 3 16 #define PCI_CFG_SPACE_SIZE 256 17 #define PCI_CFG_SPACE_EXP_SIZE 4096 18 19 /* Ugh. Need to stop exporting this to modules. */ 20 LIST_HEAD(pci_root_buses); 21 EXPORT_SYMBOL(pci_root_buses); 22 23 LIST_HEAD(pci_devices); 24 25 #ifdef HAVE_PCI_LEGACY 26 /** 27 * pci_create_legacy_files - create legacy I/O port and memory files 28 * @b: bus to create files under 29 * 30 * Some platforms allow access to legacy I/O port and ISA memory space on 31 * a per-bus basis. This routine creates the files and ties them into 32 * their associated read, write and mmap files from pci-sysfs.c 33 */ 34 static void pci_create_legacy_files(struct pci_bus *b) 35 { 36 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, 37 GFP_ATOMIC); 38 if (b->legacy_io) { 39 b->legacy_io->attr.name = "legacy_io"; 40 b->legacy_io->size = 0xffff; 41 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; 42 b->legacy_io->attr.owner = THIS_MODULE; 43 b->legacy_io->read = pci_read_legacy_io; 44 b->legacy_io->write = pci_write_legacy_io; 45 class_device_create_bin_file(&b->class_dev, b->legacy_io); 46 47 /* Allocated above after the legacy_io struct */ 48 b->legacy_mem = b->legacy_io + 1; 49 b->legacy_mem->attr.name = "legacy_mem"; 50 b->legacy_mem->size = 1024*1024; 51 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 52 b->legacy_mem->attr.owner = THIS_MODULE; 53 b->legacy_mem->mmap = pci_mmap_legacy_mem; 54 class_device_create_bin_file(&b->class_dev, b->legacy_mem); 55 } 56 } 57 58 void pci_remove_legacy_files(struct pci_bus *b) 59 { 60 if (b->legacy_io) { 61 class_device_remove_bin_file(&b->class_dev, b->legacy_io); 62 class_device_remove_bin_file(&b->class_dev, b->legacy_mem); 63 kfree(b->legacy_io); /* both are allocated here */ 64 } 65 } 66 #else /* !HAVE_PCI_LEGACY */ 67 static inline void pci_create_legacy_files(struct pci_bus *bus) { return; } 68 void pci_remove_legacy_files(struct pci_bus *bus) { return; } 69 #endif /* HAVE_PCI_LEGACY */ 70 71 /* 72 * PCI Bus Class Devices 73 */ 74 static ssize_t pci_bus_show_cpuaffinity(struct class_device *class_dev, 75 char *buf) 76 { 77 int ret; 78 cpumask_t cpumask; 79 80 cpumask = pcibus_to_cpumask(to_pci_bus(class_dev)); 81 ret = cpumask_scnprintf(buf, PAGE_SIZE, cpumask); 82 if (ret < PAGE_SIZE) 83 buf[ret++] = '\n'; 84 return ret; 85 } 86 CLASS_DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpuaffinity, NULL); 87 88 /* 89 * PCI Bus Class 90 */ 91 static void release_pcibus_dev(struct class_device *class_dev) 92 { 93 struct pci_bus *pci_bus = to_pci_bus(class_dev); 94 95 if (pci_bus->bridge) 96 put_device(pci_bus->bridge); 97 kfree(pci_bus); 98 } 99 100 static struct class pcibus_class = { 101 .name = "pci_bus", 102 .release = &release_pcibus_dev, 103 }; 104 105 static int __init pcibus_class_init(void) 106 { 107 return class_register(&pcibus_class); 108 } 109 postcore_initcall(pcibus_class_init); 110 111 /* 112 * Translate the low bits of the PCI base 113 * to the resource type 114 */ 115 static inline unsigned int pci_calc_resource_flags(unsigned int flags) 116 { 117 if (flags & PCI_BASE_ADDRESS_SPACE_IO) 118 return IORESOURCE_IO; 119 120 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) 121 return IORESOURCE_MEM | IORESOURCE_PREFETCH; 122 123 return IORESOURCE_MEM; 124 } 125 126 /* 127 * Find the extent of a PCI decode.. 128 */ 129 static u32 pci_size(u32 base, u32 maxbase, u32 mask) 130 { 131 u32 size = mask & maxbase; /* Find the significant bits */ 132 if (!size) 133 return 0; 134 135 /* Get the lowest of them to find the decode size, and 136 from that the extent. */ 137 size = (size & ~(size-1)) - 1; 138 139 /* base == maxbase can be valid only if the BAR has 140 already been programmed with all 1s. */ 141 if (base == maxbase && ((base | size) & mask) != mask) 142 return 0; 143 144 return size; 145 } 146 147 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) 148 { 149 unsigned int pos, reg, next; 150 u32 l, sz; 151 struct resource *res; 152 153 for(pos=0; pos<howmany; pos = next) { 154 next = pos+1; 155 res = &dev->resource[pos]; 156 res->name = pci_name(dev); 157 reg = PCI_BASE_ADDRESS_0 + (pos << 2); 158 pci_read_config_dword(dev, reg, &l); 159 pci_write_config_dword(dev, reg, ~0); 160 pci_read_config_dword(dev, reg, &sz); 161 pci_write_config_dword(dev, reg, l); 162 if (!sz || sz == 0xffffffff) 163 continue; 164 if (l == 0xffffffff) 165 l = 0; 166 if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) { 167 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); 168 if (!sz) 169 continue; 170 res->start = l & PCI_BASE_ADDRESS_MEM_MASK; 171 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; 172 } else { 173 sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); 174 if (!sz) 175 continue; 176 res->start = l & PCI_BASE_ADDRESS_IO_MASK; 177 res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; 178 } 179 res->end = res->start + (unsigned long) sz; 180 res->flags |= pci_calc_resource_flags(l); 181 if ((l & (PCI_BASE_ADDRESS_SPACE | PCI_BASE_ADDRESS_MEM_TYPE_MASK)) 182 == (PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64)) { 183 u32 szhi, lhi; 184 pci_read_config_dword(dev, reg+4, &lhi); 185 pci_write_config_dword(dev, reg+4, ~0); 186 pci_read_config_dword(dev, reg+4, &szhi); 187 pci_write_config_dword(dev, reg+4, lhi); 188 szhi = pci_size(lhi, szhi, 0xffffffff); 189 next++; 190 #if BITS_PER_LONG == 64 191 res->start |= ((unsigned long) lhi) << 32; 192 res->end = res->start + sz; 193 if (szhi) { 194 /* This BAR needs > 4GB? Wow. */ 195 res->end |= (unsigned long)szhi<<32; 196 } 197 #else 198 if (szhi) { 199 printk(KERN_ERR "PCI: Unable to handle 64-bit BAR for device %s\n", pci_name(dev)); 200 res->start = 0; 201 res->flags = 0; 202 } else if (lhi) { 203 /* 64-bit wide address, treat as disabled */ 204 pci_write_config_dword(dev, reg, l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); 205 pci_write_config_dword(dev, reg+4, 0); 206 res->start = 0; 207 res->end = sz; 208 } 209 #endif 210 } 211 } 212 if (rom) { 213 dev->rom_base_reg = rom; 214 res = &dev->resource[PCI_ROM_RESOURCE]; 215 res->name = pci_name(dev); 216 pci_read_config_dword(dev, rom, &l); 217 pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE); 218 pci_read_config_dword(dev, rom, &sz); 219 pci_write_config_dword(dev, rom, l); 220 if (l == 0xffffffff) 221 l = 0; 222 if (sz && sz != 0xffffffff) { 223 sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); 224 if (sz) { 225 res->flags = (l & IORESOURCE_ROM_ENABLE) | 226 IORESOURCE_MEM | IORESOURCE_PREFETCH | 227 IORESOURCE_READONLY | IORESOURCE_CACHEABLE; 228 res->start = l & PCI_ROM_ADDRESS_MASK; 229 res->end = res->start + (unsigned long) sz; 230 } 231 } 232 } 233 } 234 235 void __devinit pci_read_bridge_bases(struct pci_bus *child) 236 { 237 struct pci_dev *dev = child->self; 238 u8 io_base_lo, io_limit_lo; 239 u16 mem_base_lo, mem_limit_lo; 240 unsigned long base, limit; 241 struct resource *res; 242 int i; 243 244 if (!dev) /* It's a host bus, nothing to read */ 245 return; 246 247 if (dev->transparent) { 248 printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev)); 249 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) 250 child->resource[i] = child->parent->resource[i - 3]; 251 } 252 253 for(i=0; i<3; i++) 254 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; 255 256 res = child->resource[0]; 257 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); 258 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); 259 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8; 260 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8; 261 262 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { 263 u16 io_base_hi, io_limit_hi; 264 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); 265 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); 266 base |= (io_base_hi << 16); 267 limit |= (io_limit_hi << 16); 268 } 269 270 if (base <= limit) { 271 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; 272 if (!res->start) 273 res->start = base; 274 if (!res->end) 275 res->end = limit + 0xfff; 276 } 277 278 res = child->resource[1]; 279 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); 280 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); 281 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; 282 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; 283 if (base <= limit) { 284 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 285 res->start = base; 286 res->end = limit + 0xfffff; 287 } 288 289 res = child->resource[2]; 290 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); 291 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); 292 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; 293 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; 294 295 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { 296 u32 mem_base_hi, mem_limit_hi; 297 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); 298 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); 299 300 /* 301 * Some bridges set the base > limit by default, and some 302 * (broken) BIOSes do not initialize them. If we find 303 * this, just assume they are not being used. 304 */ 305 if (mem_base_hi <= mem_limit_hi) { 306 #if BITS_PER_LONG == 64 307 base |= ((long) mem_base_hi) << 32; 308 limit |= ((long) mem_limit_hi) << 32; 309 #else 310 if (mem_base_hi || mem_limit_hi) { 311 printk(KERN_ERR "PCI: Unable to handle 64-bit address space for bridge %s\n", pci_name(dev)); 312 return; 313 } 314 #endif 315 } 316 } 317 if (base <= limit) { 318 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; 319 res->start = base; 320 res->end = limit + 0xfffff; 321 } 322 } 323 324 static struct pci_bus * __devinit pci_alloc_bus(void) 325 { 326 struct pci_bus *b; 327 328 b = kzalloc(sizeof(*b), GFP_KERNEL); 329 if (b) { 330 INIT_LIST_HEAD(&b->node); 331 INIT_LIST_HEAD(&b->children); 332 INIT_LIST_HEAD(&b->devices); 333 } 334 return b; 335 } 336 337 static struct pci_bus * __devinit 338 pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr) 339 { 340 struct pci_bus *child; 341 int i; 342 int retval; 343 344 /* 345 * Allocate a new bus, and inherit stuff from the parent.. 346 */ 347 child = pci_alloc_bus(); 348 if (!child) 349 return NULL; 350 351 child->self = bridge; 352 child->parent = parent; 353 child->ops = parent->ops; 354 child->sysdata = parent->sysdata; 355 child->bus_flags = parent->bus_flags; 356 child->bridge = get_device(&bridge->dev); 357 358 child->class_dev.class = &pcibus_class; 359 sprintf(child->class_dev.class_id, "%04x:%02x", pci_domain_nr(child), busnr); 360 retval = class_device_register(&child->class_dev); 361 if (retval) 362 goto error_register; 363 retval = class_device_create_file(&child->class_dev, 364 &class_device_attr_cpuaffinity); 365 if (retval) 366 goto error_file_create; 367 368 /* 369 * Set up the primary, secondary and subordinate 370 * bus numbers. 371 */ 372 child->number = child->secondary = busnr; 373 child->primary = parent->secondary; 374 child->subordinate = 0xff; 375 376 /* Set up default resource pointers and names.. */ 377 for (i = 0; i < 4; i++) { 378 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; 379 child->resource[i]->name = child->name; 380 } 381 bridge->subordinate = child; 382 383 return child; 384 385 error_file_create: 386 class_device_unregister(&child->class_dev); 387 error_register: 388 kfree(child); 389 return NULL; 390 } 391 392 struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) 393 { 394 struct pci_bus *child; 395 396 child = pci_alloc_child_bus(parent, dev, busnr); 397 if (child) { 398 down_write(&pci_bus_sem); 399 list_add_tail(&child->node, &parent->children); 400 up_write(&pci_bus_sem); 401 } 402 return child; 403 } 404 405 static void pci_enable_crs(struct pci_dev *dev) 406 { 407 u16 cap, rpctl; 408 int rpcap = pci_find_capability(dev, PCI_CAP_ID_EXP); 409 if (!rpcap) 410 return; 411 412 pci_read_config_word(dev, rpcap + PCI_CAP_FLAGS, &cap); 413 if (((cap & PCI_EXP_FLAGS_TYPE) >> 4) != PCI_EXP_TYPE_ROOT_PORT) 414 return; 415 416 pci_read_config_word(dev, rpcap + PCI_EXP_RTCTL, &rpctl); 417 rpctl |= PCI_EXP_RTCTL_CRSSVE; 418 pci_write_config_word(dev, rpcap + PCI_EXP_RTCTL, rpctl); 419 } 420 421 static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) 422 { 423 struct pci_bus *parent = child->parent; 424 425 /* Attempts to fix that up are really dangerous unless 426 we're going to re-assign all bus numbers. */ 427 if (!pcibios_assign_all_busses()) 428 return; 429 430 while (parent->parent && parent->subordinate < max) { 431 parent->subordinate = max; 432 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); 433 parent = parent->parent; 434 } 435 } 436 437 unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus); 438 439 /* 440 * If it's a bridge, configure it and scan the bus behind it. 441 * For CardBus bridges, we don't scan behind as the devices will 442 * be handled by the bridge driver itself. 443 * 444 * We need to process bridges in two passes -- first we scan those 445 * already configured by the BIOS and after we are done with all of 446 * them, we proceed to assigning numbers to the remaining buses in 447 * order to avoid overlaps between old and new bus numbers. 448 */ 449 int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass) 450 { 451 struct pci_bus *child; 452 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 453 u32 buses, i, j = 0; 454 u16 bctl; 455 456 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 457 458 pr_debug("PCI: Scanning behind PCI bridge %s, config %06x, pass %d\n", 459 pci_name(dev), buses & 0xffffff, pass); 460 461 /* Disable MasterAbortMode during probing to avoid reporting 462 of bus errors (in some architectures) */ 463 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 464 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 465 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 466 467 pci_enable_crs(dev); 468 469 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) { 470 unsigned int cmax, busnr; 471 /* 472 * Bus already configured by firmware, process it in the first 473 * pass and just note the configuration. 474 */ 475 if (pass) 476 goto out; 477 busnr = (buses >> 8) & 0xFF; 478 479 /* 480 * If we already got to this bus through a different bridge, 481 * ignore it. This can happen with the i450NX chipset. 482 */ 483 if (pci_find_bus(pci_domain_nr(bus), busnr)) { 484 printk(KERN_INFO "PCI: Bus %04x:%02x already known\n", 485 pci_domain_nr(bus), busnr); 486 goto out; 487 } 488 489 child = pci_add_new_bus(bus, dev, busnr); 490 if (!child) 491 goto out; 492 child->primary = buses & 0xFF; 493 child->subordinate = (buses >> 16) & 0xFF; 494 child->bridge_ctl = bctl; 495 496 cmax = pci_scan_child_bus(child); 497 if (cmax > max) 498 max = cmax; 499 if (child->subordinate > max) 500 max = child->subordinate; 501 } else { 502 /* 503 * We need to assign a number to this bus which we always 504 * do in the second pass. 505 */ 506 if (!pass) { 507 if (pcibios_assign_all_busses()) 508 /* Temporarily disable forwarding of the 509 configuration cycles on all bridges in 510 this bus segment to avoid possible 511 conflicts in the second pass between two 512 bridges programmed with overlapping 513 bus ranges. */ 514 pci_write_config_dword(dev, PCI_PRIMARY_BUS, 515 buses & ~0xffffff); 516 goto out; 517 } 518 519 /* Clear errors */ 520 pci_write_config_word(dev, PCI_STATUS, 0xffff); 521 522 /* Prevent assigning a bus number that already exists. 523 * This can happen when a bridge is hot-plugged */ 524 if (pci_find_bus(pci_domain_nr(bus), max+1)) 525 goto out; 526 child = pci_add_new_bus(bus, dev, ++max); 527 buses = (buses & 0xff000000) 528 | ((unsigned int)(child->primary) << 0) 529 | ((unsigned int)(child->secondary) << 8) 530 | ((unsigned int)(child->subordinate) << 16); 531 532 /* 533 * yenta.c forces a secondary latency timer of 176. 534 * Copy that behaviour here. 535 */ 536 if (is_cardbus) { 537 buses &= ~0xff000000; 538 buses |= CARDBUS_LATENCY_TIMER << 24; 539 } 540 541 /* 542 * We need to blast all three values with a single write. 543 */ 544 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); 545 546 if (!is_cardbus) { 547 child->bridge_ctl = bctl | PCI_BRIDGE_CTL_NO_ISA; 548 /* 549 * Adjust subordinate busnr in parent buses. 550 * We do this before scanning for children because 551 * some devices may not be detected if the bios 552 * was lazy. 553 */ 554 pci_fixup_parent_subordinate_busnr(child, max); 555 /* Now we can scan all subordinate buses... */ 556 max = pci_scan_child_bus(child); 557 /* 558 * now fix it up again since we have found 559 * the real value of max. 560 */ 561 pci_fixup_parent_subordinate_busnr(child, max); 562 } else { 563 /* 564 * For CardBus bridges, we leave 4 bus numbers 565 * as cards with a PCI-to-PCI bridge can be 566 * inserted later. 567 */ 568 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) { 569 struct pci_bus *parent = bus; 570 if (pci_find_bus(pci_domain_nr(bus), 571 max+i+1)) 572 break; 573 while (parent->parent) { 574 if ((!pcibios_assign_all_busses()) && 575 (parent->subordinate > max) && 576 (parent->subordinate <= max+i)) { 577 j = 1; 578 } 579 parent = parent->parent; 580 } 581 if (j) { 582 /* 583 * Often, there are two cardbus bridges 584 * -- try to leave one valid bus number 585 * for each one. 586 */ 587 i /= 2; 588 break; 589 } 590 } 591 max += i; 592 pci_fixup_parent_subordinate_busnr(child, max); 593 } 594 /* 595 * Set the subordinate bus number to its real value. 596 */ 597 child->subordinate = max; 598 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); 599 } 600 601 sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number); 602 603 while (bus->parent) { 604 if ((child->subordinate > bus->subordinate) || 605 (child->number > bus->subordinate) || 606 (child->number < bus->number) || 607 (child->subordinate < bus->number)) { 608 printk(KERN_WARNING "PCI: Bus #%02x (-#%02x) is " 609 "hidden behind%s bridge #%02x (-#%02x)%s\n", 610 child->number, child->subordinate, 611 bus->self->transparent ? " transparent" : " ", 612 bus->number, bus->subordinate, 613 pcibios_assign_all_busses() ? " " : 614 " (try 'pci=assign-busses')"); 615 printk(KERN_WARNING "Please report the result to " 616 "linux-kernel to fix this permanently\n"); 617 } 618 bus = bus->parent; 619 } 620 621 out: 622 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); 623 624 return max; 625 } 626 627 /* 628 * Read interrupt line and base address registers. 629 * The architecture-dependent code can tweak these, of course. 630 */ 631 static void pci_read_irq(struct pci_dev *dev) 632 { 633 unsigned char irq; 634 635 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); 636 dev->pin = irq; 637 if (irq) 638 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); 639 dev->irq = irq; 640 } 641 642 /** 643 * pci_setup_device - fill in class and map information of a device 644 * @dev: the device structure to fill 645 * 646 * Initialize the device structure with information about the device's 647 * vendor,class,memory and IO-space addresses,IRQ lines etc. 648 * Called at initialisation of the PCI subsystem and by CardBus services. 649 * Returns 0 on success and -1 if unknown type of device (not normal, bridge 650 * or CardBus). 651 */ 652 static int pci_setup_device(struct pci_dev * dev) 653 { 654 u32 class; 655 656 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), 657 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); 658 659 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 660 class >>= 8; /* upper 3 bytes */ 661 dev->class = class; 662 class >>= 8; 663 664 pr_debug("PCI: Found %s [%04x/%04x] %06x %02x\n", pci_name(dev), 665 dev->vendor, dev->device, class, dev->hdr_type); 666 667 /* "Unknown power state" */ 668 dev->current_state = PCI_UNKNOWN; 669 670 /* Early fixups, before probing the BARs */ 671 pci_fixup_device(pci_fixup_early, dev); 672 class = dev->class >> 8; 673 674 switch (dev->hdr_type) { /* header type */ 675 case PCI_HEADER_TYPE_NORMAL: /* standard header */ 676 if (class == PCI_CLASS_BRIDGE_PCI) 677 goto bad; 678 pci_read_irq(dev); 679 pci_read_bases(dev, 6, PCI_ROM_ADDRESS); 680 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 681 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device); 682 683 /* 684 * Do the ugly legacy mode stuff here rather than broken chip 685 * quirk code. Legacy mode ATA controllers have fixed 686 * addresses. These are not always echoed in BAR0-3, and 687 * BAR0-3 in a few cases contain junk! 688 */ 689 if (class == PCI_CLASS_STORAGE_IDE) { 690 u8 progif; 691 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); 692 if ((progif & 1) == 0) { 693 dev->resource[0].start = 0x1F0; 694 dev->resource[0].end = 0x1F7; 695 dev->resource[0].flags = IORESOURCE_IO; 696 dev->resource[1].start = 0x3F6; 697 dev->resource[1].end = 0x3F6; 698 dev->resource[1].flags = IORESOURCE_IO; 699 } 700 if ((progif & 4) == 0) { 701 dev->resource[2].start = 0x170; 702 dev->resource[2].end = 0x177; 703 dev->resource[2].flags = IORESOURCE_IO; 704 dev->resource[3].start = 0x376; 705 dev->resource[3].end = 0x376; 706 dev->resource[3].flags = IORESOURCE_IO; 707 } 708 } 709 break; 710 711 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ 712 if (class != PCI_CLASS_BRIDGE_PCI) 713 goto bad; 714 /* The PCI-to-PCI bridge spec requires that subtractive 715 decoding (i.e. transparent) bridge must have programming 716 interface code of 0x01. */ 717 pci_read_irq(dev); 718 dev->transparent = ((dev->class & 0xff) == 1); 719 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 720 break; 721 722 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 723 if (class != PCI_CLASS_BRIDGE_CARDBUS) 724 goto bad; 725 pci_read_irq(dev); 726 pci_read_bases(dev, 1, 0); 727 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); 728 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); 729 break; 730 731 default: /* unknown header */ 732 printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", 733 pci_name(dev), dev->hdr_type); 734 return -1; 735 736 bad: 737 printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", 738 pci_name(dev), class, dev->hdr_type); 739 dev->class = PCI_CLASS_NOT_DEFINED; 740 } 741 742 /* We found a fine healthy device, go go go... */ 743 return 0; 744 } 745 746 /** 747 * pci_release_dev - free a pci device structure when all users of it are finished. 748 * @dev: device that's been disconnected 749 * 750 * Will be called only by the device core when all users of this pci device are 751 * done. 752 */ 753 static void pci_release_dev(struct device *dev) 754 { 755 struct pci_dev *pci_dev; 756 757 pci_dev = to_pci_dev(dev); 758 kfree(pci_dev); 759 } 760 761 /** 762 * pci_cfg_space_size - get the configuration space size of the PCI device. 763 * @dev: PCI device 764 * 765 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices 766 * have 4096 bytes. Even if the device is capable, that doesn't mean we can 767 * access it. Maybe we don't have a way to generate extended config space 768 * accesses, or the device is behind a reverse Express bridge. So we try 769 * reading the dword at 0x100 which must either be 0 or a valid extended 770 * capability header. 771 */ 772 int pci_cfg_space_size(struct pci_dev *dev) 773 { 774 int pos; 775 u32 status; 776 777 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 778 if (!pos) { 779 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 780 if (!pos) 781 goto fail; 782 783 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); 784 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))) 785 goto fail; 786 } 787 788 if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL) 789 goto fail; 790 if (status == 0xffffffff) 791 goto fail; 792 793 return PCI_CFG_SPACE_EXP_SIZE; 794 795 fail: 796 return PCI_CFG_SPACE_SIZE; 797 } 798 799 static void pci_release_bus_bridge_dev(struct device *dev) 800 { 801 kfree(dev); 802 } 803 804 /* 805 * Read the config data for a PCI device, sanity-check it 806 * and fill in the dev structure... 807 */ 808 static struct pci_dev * __devinit 809 pci_scan_device(struct pci_bus *bus, int devfn) 810 { 811 struct pci_dev *dev; 812 u32 l; 813 u8 hdr_type; 814 int delay = 1; 815 816 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l)) 817 return NULL; 818 819 /* some broken boards return 0 or ~0 if a slot is empty: */ 820 if (l == 0xffffffff || l == 0x00000000 || 821 l == 0x0000ffff || l == 0xffff0000) 822 return NULL; 823 824 /* Configuration request Retry Status */ 825 while (l == 0xffff0001) { 826 msleep(delay); 827 delay *= 2; 828 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l)) 829 return NULL; 830 /* Card hasn't responded in 60 seconds? Must be stuck. */ 831 if (delay > 60 * 1000) { 832 printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " 833 "responding\n", pci_domain_nr(bus), 834 bus->number, PCI_SLOT(devfn), 835 PCI_FUNC(devfn)); 836 return NULL; 837 } 838 } 839 840 if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type)) 841 return NULL; 842 843 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); 844 if (!dev) 845 return NULL; 846 847 dev->bus = bus; 848 dev->sysdata = bus->sysdata; 849 dev->dev.parent = bus->bridge; 850 dev->dev.bus = &pci_bus_type; 851 dev->devfn = devfn; 852 dev->hdr_type = hdr_type & 0x7f; 853 dev->multifunction = !!(hdr_type & 0x80); 854 dev->vendor = l & 0xffff; 855 dev->device = (l >> 16) & 0xffff; 856 dev->cfg_size = pci_cfg_space_size(dev); 857 dev->error_state = pci_channel_io_normal; 858 859 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) 860 set this higher, assuming the system even supports it. */ 861 dev->dma_mask = 0xffffffff; 862 if (pci_setup_device(dev) < 0) { 863 kfree(dev); 864 return NULL; 865 } 866 867 return dev; 868 } 869 870 void __devinit pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 871 { 872 device_initialize(&dev->dev); 873 dev->dev.release = pci_release_dev; 874 pci_dev_get(dev); 875 876 set_dev_node(&dev->dev, pcibus_to_node(bus)); 877 dev->dev.dma_mask = &dev->dma_mask; 878 dev->dev.coherent_dma_mask = 0xffffffffull; 879 880 /* Fix up broken headers */ 881 pci_fixup_device(pci_fixup_header, dev); 882 883 /* 884 * Add the device to our list of discovered devices 885 * and the bus list for fixup functions, etc. 886 */ 887 INIT_LIST_HEAD(&dev->global_list); 888 down_write(&pci_bus_sem); 889 list_add_tail(&dev->bus_list, &bus->devices); 890 up_write(&pci_bus_sem); 891 } 892 893 struct pci_dev * __devinit 894 pci_scan_single_device(struct pci_bus *bus, int devfn) 895 { 896 struct pci_dev *dev; 897 898 dev = pci_scan_device(bus, devfn); 899 if (!dev) 900 return NULL; 901 902 pci_device_add(dev, bus); 903 pci_scan_msi_device(dev); 904 905 return dev; 906 } 907 908 /** 909 * pci_scan_slot - scan a PCI slot on a bus for devices. 910 * @bus: PCI bus to scan 911 * @devfn: slot number to scan (must have zero function.) 912 * 913 * Scan a PCI slot on the specified PCI bus for devices, adding 914 * discovered devices to the @bus->devices list. New devices 915 * will have an empty dev->global_list head. 916 */ 917 int __devinit pci_scan_slot(struct pci_bus *bus, int devfn) 918 { 919 int func, nr = 0; 920 int scan_all_fns; 921 922 scan_all_fns = pcibios_scan_all_fns(bus, devfn); 923 924 for (func = 0; func < 8; func++, devfn++) { 925 struct pci_dev *dev; 926 927 dev = pci_scan_single_device(bus, devfn); 928 if (dev) { 929 nr++; 930 931 /* 932 * If this is a single function device, 933 * don't scan past the first function. 934 */ 935 if (!dev->multifunction) { 936 if (func > 0) { 937 dev->multifunction = 1; 938 } else { 939 break; 940 } 941 } 942 } else { 943 if (func == 0 && !scan_all_fns) 944 break; 945 } 946 } 947 return nr; 948 } 949 950 unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) 951 { 952 unsigned int devfn, pass, max = bus->secondary; 953 struct pci_dev *dev; 954 955 pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number); 956 957 /* Go find them, Rover! */ 958 for (devfn = 0; devfn < 0x100; devfn += 8) 959 pci_scan_slot(bus, devfn); 960 961 /* 962 * After performing arch-dependent fixup of the bus, look behind 963 * all PCI-to-PCI bridges on this bus. 964 */ 965 pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number); 966 pcibios_fixup_bus(bus); 967 for (pass=0; pass < 2; pass++) 968 list_for_each_entry(dev, &bus->devices, bus_list) { 969 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 970 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 971 max = pci_scan_bridge(bus, dev, max, pass); 972 } 973 974 /* 975 * We've scanned the bus and so we know all about what's on 976 * the other side of any bridges that may be on this bus plus 977 * any devices. 978 * 979 * Return how far we've got finding sub-buses. 980 */ 981 pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n", 982 pci_domain_nr(bus), bus->number, max); 983 return max; 984 } 985 986 unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus) 987 { 988 unsigned int max; 989 990 max = pci_scan_child_bus(bus); 991 992 /* 993 * Make the discovered devices available. 994 */ 995 pci_bus_add_devices(bus); 996 997 return max; 998 } 999 1000 struct pci_bus * __devinit pci_create_bus(struct device *parent, 1001 int bus, struct pci_ops *ops, void *sysdata) 1002 { 1003 int error; 1004 struct pci_bus *b; 1005 struct device *dev; 1006 1007 b = pci_alloc_bus(); 1008 if (!b) 1009 return NULL; 1010 1011 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 1012 if (!dev){ 1013 kfree(b); 1014 return NULL; 1015 } 1016 1017 b->sysdata = sysdata; 1018 b->ops = ops; 1019 1020 if (pci_find_bus(pci_domain_nr(b), bus)) { 1021 /* If we already got to this bus through a different bridge, ignore it */ 1022 pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus); 1023 goto err_out; 1024 } 1025 1026 down_write(&pci_bus_sem); 1027 list_add_tail(&b->node, &pci_root_buses); 1028 up_write(&pci_bus_sem); 1029 1030 memset(dev, 0, sizeof(*dev)); 1031 dev->parent = parent; 1032 dev->release = pci_release_bus_bridge_dev; 1033 sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus); 1034 error = device_register(dev); 1035 if (error) 1036 goto dev_reg_err; 1037 b->bridge = get_device(dev); 1038 1039 b->class_dev.class = &pcibus_class; 1040 sprintf(b->class_dev.class_id, "%04x:%02x", pci_domain_nr(b), bus); 1041 error = class_device_register(&b->class_dev); 1042 if (error) 1043 goto class_dev_reg_err; 1044 error = class_device_create_file(&b->class_dev, &class_device_attr_cpuaffinity); 1045 if (error) 1046 goto class_dev_create_file_err; 1047 1048 /* Create legacy_io and legacy_mem files for this bus */ 1049 pci_create_legacy_files(b); 1050 1051 error = sysfs_create_link(&b->class_dev.kobj, &b->bridge->kobj, "bridge"); 1052 if (error) 1053 goto sys_create_link_err; 1054 1055 b->number = b->secondary = bus; 1056 b->resource[0] = &ioport_resource; 1057 b->resource[1] = &iomem_resource; 1058 1059 return b; 1060 1061 sys_create_link_err: 1062 class_device_remove_file(&b->class_dev, &class_device_attr_cpuaffinity); 1063 class_dev_create_file_err: 1064 class_device_unregister(&b->class_dev); 1065 class_dev_reg_err: 1066 device_unregister(dev); 1067 dev_reg_err: 1068 down_write(&pci_bus_sem); 1069 list_del(&b->node); 1070 up_write(&pci_bus_sem); 1071 err_out: 1072 kfree(dev); 1073 kfree(b); 1074 return NULL; 1075 } 1076 EXPORT_SYMBOL_GPL(pci_create_bus); 1077 1078 struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, 1079 int bus, struct pci_ops *ops, void *sysdata) 1080 { 1081 struct pci_bus *b; 1082 1083 b = pci_create_bus(parent, bus, ops, sysdata); 1084 if (b) 1085 b->subordinate = pci_scan_child_bus(b); 1086 return b; 1087 } 1088 EXPORT_SYMBOL(pci_scan_bus_parented); 1089 1090 #ifdef CONFIG_HOTPLUG 1091 EXPORT_SYMBOL(pci_add_new_bus); 1092 EXPORT_SYMBOL(pci_do_scan_bus); 1093 EXPORT_SYMBOL(pci_scan_slot); 1094 EXPORT_SYMBOL(pci_scan_bridge); 1095 EXPORT_SYMBOL(pci_scan_single_device); 1096 EXPORT_SYMBOL_GPL(pci_scan_child_bus); 1097 #endif 1098 1099 static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev *b) 1100 { 1101 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; 1102 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; 1103 1104 if (a->bus->number < b->bus->number) return -1; 1105 else if (a->bus->number > b->bus->number) return 1; 1106 1107 if (a->devfn < b->devfn) return -1; 1108 else if (a->devfn > b->devfn) return 1; 1109 1110 return 0; 1111 } 1112 1113 /* 1114 * Yes, this forcably breaks the klist abstraction temporarily. It 1115 * just wants to sort the klist, not change reference counts and 1116 * take/drop locks rapidly in the process. It does all this while 1117 * holding the lock for the list, so objects can't otherwise be 1118 * added/removed while we're swizzling. 1119 */ 1120 static void __init pci_insertion_sort_klist(struct pci_dev *a, struct list_head *list) 1121 { 1122 struct list_head *pos; 1123 struct klist_node *n; 1124 struct device *dev; 1125 struct pci_dev *b; 1126 1127 list_for_each(pos, list) { 1128 n = container_of(pos, struct klist_node, n_node); 1129 dev = container_of(n, struct device, knode_bus); 1130 b = to_pci_dev(dev); 1131 if (pci_sort_bf_cmp(a, b) <= 0) { 1132 list_move_tail(&a->dev.knode_bus.n_node, &b->dev.knode_bus.n_node); 1133 return; 1134 } 1135 } 1136 list_move_tail(&a->dev.knode_bus.n_node, list); 1137 } 1138 1139 static void __init pci_sort_breadthfirst_klist(void) 1140 { 1141 LIST_HEAD(sorted_devices); 1142 struct list_head *pos, *tmp; 1143 struct klist_node *n; 1144 struct device *dev; 1145 struct pci_dev *pdev; 1146 1147 spin_lock(&pci_bus_type.klist_devices.k_lock); 1148 list_for_each_safe(pos, tmp, &pci_bus_type.klist_devices.k_list) { 1149 n = container_of(pos, struct klist_node, n_node); 1150 dev = container_of(n, struct device, knode_bus); 1151 pdev = to_pci_dev(dev); 1152 pci_insertion_sort_klist(pdev, &sorted_devices); 1153 } 1154 list_splice(&sorted_devices, &pci_bus_type.klist_devices.k_list); 1155 spin_unlock(&pci_bus_type.klist_devices.k_lock); 1156 } 1157 1158 static void __init pci_insertion_sort_devices(struct pci_dev *a, struct list_head *list) 1159 { 1160 struct pci_dev *b; 1161 1162 list_for_each_entry(b, list, global_list) { 1163 if (pci_sort_bf_cmp(a, b) <= 0) { 1164 list_move_tail(&a->global_list, &b->global_list); 1165 return; 1166 } 1167 } 1168 list_move_tail(&a->global_list, list); 1169 } 1170 1171 static void __init pci_sort_breadthfirst_devices(void) 1172 { 1173 LIST_HEAD(sorted_devices); 1174 struct pci_dev *dev, *tmp; 1175 1176 down_write(&pci_bus_sem); 1177 list_for_each_entry_safe(dev, tmp, &pci_devices, global_list) { 1178 pci_insertion_sort_devices(dev, &sorted_devices); 1179 } 1180 list_splice(&sorted_devices, &pci_devices); 1181 up_write(&pci_bus_sem); 1182 } 1183 1184 void __init pci_sort_breadthfirst(void) 1185 { 1186 pci_sort_breadthfirst_devices(); 1187 pci_sort_breadthfirst_klist(); 1188 } 1189 1190