1 /* 2 * drivers/pci/setup-bus.c 3 * 4 * Extruded from code written by 5 * Dave Rusling (david.rusling@reo.mts.dec.com) 6 * David Mosberger (davidm@cs.arizona.edu) 7 * David Miller (davem@redhat.com) 8 * 9 * Support routines for initializing a PCI subsystem. 10 */ 11 12 /* 13 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru> 14 * PCI-PCI bridges cleanup, sorted resource allocation. 15 * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru> 16 * Converted to allocation in 3 passes, which gives 17 * tighter packing. Prefetchable range support. 18 */ 19 20 #include <linux/init.h> 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/pci.h> 24 #include <linux/errno.h> 25 #include <linux/ioport.h> 26 #include <linux/cache.h> 27 #include <linux/slab.h> 28 29 30 static void pbus_assign_resources_sorted(const struct pci_bus *bus) 31 { 32 struct pci_dev *dev; 33 struct resource *res; 34 struct resource_list head, *list, *tmp; 35 int idx; 36 37 head.next = NULL; 38 list_for_each_entry(dev, &bus->devices, bus_list) { 39 u16 class = dev->class >> 8; 40 41 /* Don't touch classless devices or host bridges or ioapics. */ 42 if (class == PCI_CLASS_NOT_DEFINED || 43 class == PCI_CLASS_BRIDGE_HOST) 44 continue; 45 46 /* Don't touch ioapic devices already enabled by firmware */ 47 if (class == PCI_CLASS_SYSTEM_PIC) { 48 u16 command; 49 pci_read_config_word(dev, PCI_COMMAND, &command); 50 if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) 51 continue; 52 } 53 54 pdev_sort_resources(dev, &head); 55 } 56 57 for (list = head.next; list;) { 58 res = list->res; 59 idx = res - &list->dev->resource[0]; 60 if (pci_assign_resource(list->dev, idx)) { 61 res->start = 0; 62 res->end = 0; 63 res->flags = 0; 64 } 65 tmp = list; 66 list = list->next; 67 kfree(tmp); 68 } 69 } 70 71 void pci_setup_cardbus(struct pci_bus *bus) 72 { 73 struct pci_dev *bridge = bus->self; 74 struct pci_bus_region region; 75 76 dev_info(&bridge->dev, "CardBus bridge, secondary bus %04x:%02x\n", 77 pci_domain_nr(bus), bus->number); 78 79 pcibios_resource_to_bus(bridge, ®ion, bus->resource[0]); 80 if (bus->resource[0]->flags & IORESOURCE_IO) { 81 /* 82 * The IO resource is allocated a range twice as large as it 83 * would normally need. This allows us to set both IO regs. 84 */ 85 dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n", 86 (unsigned long)region.start, 87 (unsigned long)region.end); 88 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, 89 region.start); 90 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0, 91 region.end); 92 } 93 94 pcibios_resource_to_bus(bridge, ®ion, bus->resource[1]); 95 if (bus->resource[1]->flags & IORESOURCE_IO) { 96 dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n", 97 (unsigned long)region.start, 98 (unsigned long)region.end); 99 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, 100 region.start); 101 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1, 102 region.end); 103 } 104 105 pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]); 106 if (bus->resource[2]->flags & IORESOURCE_MEM) { 107 dev_info(&bridge->dev, " PREFETCH window: %#08lx-%#08lx\n", 108 (unsigned long)region.start, 109 (unsigned long)region.end); 110 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, 111 region.start); 112 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0, 113 region.end); 114 } 115 116 pcibios_resource_to_bus(bridge, ®ion, bus->resource[3]); 117 if (bus->resource[3]->flags & IORESOURCE_MEM) { 118 dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n", 119 (unsigned long)region.start, 120 (unsigned long)region.end); 121 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, 122 region.start); 123 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1, 124 region.end); 125 } 126 } 127 EXPORT_SYMBOL(pci_setup_cardbus); 128 129 /* Initialize bridges with base/limit values we have collected. 130 PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998) 131 requires that if there is no I/O ports or memory behind the 132 bridge, corresponding range must be turned off by writing base 133 value greater than limit to the bridge's base/limit registers. 134 135 Note: care must be taken when updating I/O base/limit registers 136 of bridges which support 32-bit I/O. This update requires two 137 config space writes, so it's quite possible that an I/O window of 138 the bridge will have some undesirable address (e.g. 0) after the 139 first write. Ditto 64-bit prefetchable MMIO. */ 140 static void pci_setup_bridge(struct pci_bus *bus) 141 { 142 struct pci_dev *bridge = bus->self; 143 struct pci_bus_region region; 144 u32 l, bu, lu, io_upper16; 145 int pref_mem64; 146 147 if (pci_is_enabled(bridge)) 148 return; 149 150 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n", 151 pci_domain_nr(bus), bus->number); 152 153 /* Set up the top and bottom of the PCI I/O segment for this bus. */ 154 pcibios_resource_to_bus(bridge, ®ion, bus->resource[0]); 155 if (bus->resource[0]->flags & IORESOURCE_IO) { 156 pci_read_config_dword(bridge, PCI_IO_BASE, &l); 157 l &= 0xffff0000; 158 l |= (region.start >> 8) & 0x00f0; 159 l |= region.end & 0xf000; 160 /* Set up upper 16 bits of I/O base/limit. */ 161 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); 162 dev_info(&bridge->dev, " IO window: %#04lx-%#04lx\n", 163 (unsigned long)region.start, 164 (unsigned long)region.end); 165 } 166 else { 167 /* Clear upper 16 bits of I/O base/limit. */ 168 io_upper16 = 0; 169 l = 0x00f0; 170 dev_info(&bridge->dev, " IO window: disabled\n"); 171 } 172 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ 173 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); 174 /* Update lower 16 bits of I/O base/limit. */ 175 pci_write_config_dword(bridge, PCI_IO_BASE, l); 176 /* Update upper 16 bits of I/O base/limit. */ 177 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); 178 179 /* Set up the top and bottom of the PCI Memory segment 180 for this bus. */ 181 pcibios_resource_to_bus(bridge, ®ion, bus->resource[1]); 182 if (bus->resource[1]->flags & IORESOURCE_MEM) { 183 l = (region.start >> 16) & 0xfff0; 184 l |= region.end & 0xfff00000; 185 dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n", 186 (unsigned long)region.start, 187 (unsigned long)region.end); 188 } 189 else { 190 l = 0x0000fff0; 191 dev_info(&bridge->dev, " MEM window: disabled\n"); 192 } 193 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); 194 195 /* Clear out the upper 32 bits of PREF limit. 196 If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily 197 disables PREF range, which is ok. */ 198 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); 199 200 /* Set up PREF base/limit. */ 201 pref_mem64 = 0; 202 bu = lu = 0; 203 pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]); 204 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) { 205 int width = 8; 206 l = (region.start >> 16) & 0xfff0; 207 l |= region.end & 0xfff00000; 208 if (bus->resource[2]->flags & IORESOURCE_MEM_64) { 209 pref_mem64 = 1; 210 bu = upper_32_bits(region.start); 211 lu = upper_32_bits(region.end); 212 width = 16; 213 } 214 dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n", 215 width, (unsigned long long)region.start, 216 width, (unsigned long long)region.end); 217 } 218 else { 219 l = 0x0000fff0; 220 dev_info(&bridge->dev, " PREFETCH window: disabled\n"); 221 } 222 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); 223 224 if (pref_mem64) { 225 /* Set the upper 32 bits of PREF base & limit. */ 226 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); 227 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); 228 } 229 230 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); 231 } 232 233 /* Check whether the bridge supports optional I/O and 234 prefetchable memory ranges. If not, the respective 235 base/limit registers must be read-only and read as 0. */ 236 static void pci_bridge_check_ranges(struct pci_bus *bus) 237 { 238 u16 io; 239 u32 pmem; 240 struct pci_dev *bridge = bus->self; 241 struct resource *b_res; 242 243 b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; 244 b_res[1].flags |= IORESOURCE_MEM; 245 246 pci_read_config_word(bridge, PCI_IO_BASE, &io); 247 if (!io) { 248 pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); 249 pci_read_config_word(bridge, PCI_IO_BASE, &io); 250 pci_write_config_word(bridge, PCI_IO_BASE, 0x0); 251 } 252 if (io) 253 b_res[0].flags |= IORESOURCE_IO; 254 /* DECchip 21050 pass 2 errata: the bridge may miss an address 255 disconnect boundary by one PCI data phase. 256 Workaround: do not use prefetching on this device. */ 257 if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) 258 return; 259 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); 260 if (!pmem) { 261 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 262 0xfff0fff0); 263 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); 264 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); 265 } 266 if (pmem) { 267 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; 268 if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) 269 b_res[2].flags |= IORESOURCE_MEM_64; 270 } 271 272 /* double check if bridge does support 64 bit pref */ 273 if (b_res[2].flags & IORESOURCE_MEM_64) { 274 u32 mem_base_hi, tmp; 275 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, 276 &mem_base_hi); 277 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, 278 0xffffffff); 279 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); 280 if (!tmp) 281 b_res[2].flags &= ~IORESOURCE_MEM_64; 282 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, 283 mem_base_hi); 284 } 285 } 286 287 /* Helper function for sizing routines: find first available 288 bus resource of a given type. Note: we intentionally skip 289 the bus resources which have already been assigned (that is, 290 have non-NULL parent resource). */ 291 static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type) 292 { 293 int i; 294 struct resource *r; 295 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | 296 IORESOURCE_PREFETCH; 297 298 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 299 r = bus->resource[i]; 300 if (r == &ioport_resource || r == &iomem_resource) 301 continue; 302 if (r && (r->flags & type_mask) == type && !r->parent) 303 return r; 304 } 305 return NULL; 306 } 307 308 /* Sizing the IO windows of the PCI-PCI bridge is trivial, 309 since these windows have 4K granularity and the IO ranges 310 of non-bridge PCI devices are limited to 256 bytes. 311 We must be careful with the ISA aliasing though. */ 312 static void pbus_size_io(struct pci_bus *bus) 313 { 314 struct pci_dev *dev; 315 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); 316 unsigned long size = 0, size1 = 0; 317 318 if (!b_res) 319 return; 320 321 list_for_each_entry(dev, &bus->devices, bus_list) { 322 int i; 323 324 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 325 struct resource *r = &dev->resource[i]; 326 unsigned long r_size; 327 328 if (r->parent || !(r->flags & IORESOURCE_IO)) 329 continue; 330 r_size = resource_size(r); 331 332 if (r_size < 0x400) 333 /* Might be re-aligned for ISA */ 334 size += r_size; 335 else 336 size1 += r_size; 337 } 338 } 339 /* To be fixed in 2.5: we should have sort of HAVE_ISA 340 flag in the struct pci_bus. */ 341 #if defined(CONFIG_ISA) || defined(CONFIG_EISA) 342 size = (size & 0xff) + ((size & ~0xffUL) << 2); 343 #endif 344 size = ALIGN(size + size1, 4096); 345 if (!size) { 346 b_res->flags = 0; 347 return; 348 } 349 /* Alignment of the IO window is always 4K */ 350 b_res->start = 4096; 351 b_res->end = b_res->start + size - 1; 352 b_res->flags |= IORESOURCE_STARTALIGN; 353 } 354 355 /* Calculate the size of the bus and minimal alignment which 356 guarantees that all child resources fit in this size. */ 357 static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type) 358 { 359 struct pci_dev *dev; 360 resource_size_t min_align, align, size; 361 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ 362 int order, max_order; 363 struct resource *b_res = find_free_bus_resource(bus, type); 364 unsigned int mem64_mask = 0; 365 366 if (!b_res) 367 return 0; 368 369 memset(aligns, 0, sizeof(aligns)); 370 max_order = 0; 371 size = 0; 372 373 mem64_mask = b_res->flags & IORESOURCE_MEM_64; 374 b_res->flags &= ~IORESOURCE_MEM_64; 375 376 list_for_each_entry(dev, &bus->devices, bus_list) { 377 int i; 378 379 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 380 struct resource *r = &dev->resource[i]; 381 resource_size_t r_size; 382 383 if (r->parent || (r->flags & mask) != type) 384 continue; 385 r_size = resource_size(r); 386 /* For bridges size != alignment */ 387 align = resource_alignment(r); 388 order = __ffs(align) - 20; 389 if (order > 11) { 390 dev_warn(&dev->dev, "BAR %d bad alignment %llx: " 391 "%pR\n", i, (unsigned long long)align, r); 392 r->flags = 0; 393 continue; 394 } 395 size += r_size; 396 if (order < 0) 397 order = 0; 398 /* Exclude ranges with size > align from 399 calculation of the alignment. */ 400 if (r_size == align) 401 aligns[order] += align; 402 if (order > max_order) 403 max_order = order; 404 mem64_mask &= r->flags & IORESOURCE_MEM_64; 405 } 406 } 407 408 align = 0; 409 min_align = 0; 410 for (order = 0; order <= max_order; order++) { 411 resource_size_t align1 = 1; 412 413 align1 <<= (order + 20); 414 415 if (!align) 416 min_align = align1; 417 else if (ALIGN(align + min_align, min_align) < align1) 418 min_align = align1 >> 1; 419 align += aligns[order]; 420 } 421 size = ALIGN(size, min_align); 422 if (!size) { 423 b_res->flags = 0; 424 return 1; 425 } 426 b_res->start = min_align; 427 b_res->end = size + min_align - 1; 428 b_res->flags |= IORESOURCE_STARTALIGN; 429 b_res->flags |= mem64_mask; 430 return 1; 431 } 432 433 static void pci_bus_size_cardbus(struct pci_bus *bus) 434 { 435 struct pci_dev *bridge = bus->self; 436 struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; 437 u16 ctrl; 438 439 /* 440 * Reserve some resources for CardBus. We reserve 441 * a fixed amount of bus space for CardBus bridges. 442 */ 443 b_res[0].start = 0; 444 b_res[0].end = pci_cardbus_io_size - 1; 445 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; 446 447 b_res[1].start = 0; 448 b_res[1].end = pci_cardbus_io_size - 1; 449 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; 450 451 /* 452 * Check whether prefetchable memory is supported 453 * by this bridge. 454 */ 455 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); 456 if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) { 457 ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0; 458 pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl); 459 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); 460 } 461 462 /* 463 * If we have prefetchable memory support, allocate 464 * two regions. Otherwise, allocate one region of 465 * twice the size. 466 */ 467 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { 468 b_res[2].start = 0; 469 b_res[2].end = pci_cardbus_mem_size - 1; 470 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; 471 472 b_res[3].start = 0; 473 b_res[3].end = pci_cardbus_mem_size - 1; 474 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; 475 } else { 476 b_res[3].start = 0; 477 b_res[3].end = pci_cardbus_mem_size * 2 - 1; 478 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; 479 } 480 } 481 482 void __ref pci_bus_size_bridges(struct pci_bus *bus) 483 { 484 struct pci_dev *dev; 485 unsigned long mask, prefmask; 486 487 list_for_each_entry(dev, &bus->devices, bus_list) { 488 struct pci_bus *b = dev->subordinate; 489 if (!b) 490 continue; 491 492 switch (dev->class >> 8) { 493 case PCI_CLASS_BRIDGE_CARDBUS: 494 pci_bus_size_cardbus(b); 495 break; 496 497 case PCI_CLASS_BRIDGE_PCI: 498 default: 499 pci_bus_size_bridges(b); 500 break; 501 } 502 } 503 504 /* The root bus? */ 505 if (!bus->self) 506 return; 507 508 switch (bus->self->class >> 8) { 509 case PCI_CLASS_BRIDGE_CARDBUS: 510 /* don't size cardbuses yet. */ 511 break; 512 513 case PCI_CLASS_BRIDGE_PCI: 514 pci_bridge_check_ranges(bus); 515 default: 516 pbus_size_io(bus); 517 /* If the bridge supports prefetchable range, size it 518 separately. If it doesn't, or its prefetchable window 519 has already been allocated by arch code, try 520 non-prefetchable range for both types of PCI memory 521 resources. */ 522 mask = IORESOURCE_MEM; 523 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; 524 if (pbus_size_mem(bus, prefmask, prefmask)) 525 mask = prefmask; /* Success, size non-prefetch only. */ 526 pbus_size_mem(bus, mask, IORESOURCE_MEM); 527 break; 528 } 529 } 530 EXPORT_SYMBOL(pci_bus_size_bridges); 531 532 void __ref pci_bus_assign_resources(const struct pci_bus *bus) 533 { 534 struct pci_bus *b; 535 struct pci_dev *dev; 536 537 pbus_assign_resources_sorted(bus); 538 539 list_for_each_entry(dev, &bus->devices, bus_list) { 540 b = dev->subordinate; 541 if (!b) 542 continue; 543 544 pci_bus_assign_resources(b); 545 546 switch (dev->class >> 8) { 547 case PCI_CLASS_BRIDGE_PCI: 548 pci_setup_bridge(b); 549 break; 550 551 case PCI_CLASS_BRIDGE_CARDBUS: 552 pci_setup_cardbus(b); 553 break; 554 555 default: 556 dev_info(&dev->dev, "not setting up bridge for bus " 557 "%04x:%02x\n", pci_domain_nr(b), b->number); 558 break; 559 } 560 } 561 } 562 EXPORT_SYMBOL(pci_bus_assign_resources); 563 564 static void pci_bus_dump_res(struct pci_bus *bus) 565 { 566 int i; 567 568 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 569 struct resource *res = bus->resource[i]; 570 if (!res || !res->end) 571 continue; 572 573 dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i, 574 (res->flags & IORESOURCE_IO) ? "io: " : 575 ((res->flags & IORESOURCE_PREFETCH)? "pref mem":"mem:"), 576 res); 577 } 578 } 579 580 static void pci_bus_dump_resources(struct pci_bus *bus) 581 { 582 struct pci_bus *b; 583 struct pci_dev *dev; 584 585 586 pci_bus_dump_res(bus); 587 588 list_for_each_entry(dev, &bus->devices, bus_list) { 589 b = dev->subordinate; 590 if (!b) 591 continue; 592 593 pci_bus_dump_resources(b); 594 } 595 } 596 597 void __init 598 pci_assign_unassigned_resources(void) 599 { 600 struct pci_bus *bus; 601 602 /* Depth first, calculate sizes and alignments of all 603 subordinate buses. */ 604 list_for_each_entry(bus, &pci_root_buses, node) { 605 pci_bus_size_bridges(bus); 606 } 607 /* Depth last, allocate resources and update the hardware. */ 608 list_for_each_entry(bus, &pci_root_buses, node) { 609 pci_bus_assign_resources(bus); 610 pci_enable_bridges(bus); 611 } 612 613 /* dump the resource on buses */ 614 list_for_each_entry(bus, &pci_root_buses, node) { 615 pci_bus_dump_resources(bus); 616 } 617 } 618