1 /* 2 * Port for PPC64 David Engebretsen, IBM Corp. 3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. 4 * 5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM 6 * Rework, based on alpha PCI code. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #undef DEBUG 15 16 #include <linux/kernel.h> 17 #include <linux/pci.h> 18 #include <linux/string.h> 19 #include <linux/init.h> 20 #include <linux/bootmem.h> 21 #include <linux/mm.h> 22 #include <linux/list.h> 23 #include <linux/syscalls.h> 24 #include <linux/irq.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/processor.h> 28 #include <asm/io.h> 29 #include <asm/prom.h> 30 #include <asm/pci-bridge.h> 31 #include <asm/byteorder.h> 32 #include <asm/machdep.h> 33 #include <asm/ppc-pci.h> 34 #include <asm/firmware.h> 35 36 #ifdef DEBUG 37 #include <asm/udbg.h> 38 #define DBG(fmt...) printk(fmt) 39 #else 40 #define DBG(fmt...) 41 #endif 42 43 unsigned long pci_probe_only = 1; 44 int pci_assign_all_buses = 0; 45 46 static void fixup_resource(struct resource *res, struct pci_dev *dev); 47 static void do_bus_setup(struct pci_bus *bus); 48 49 /* pci_io_base -- the base address from which io bars are offsets. 50 * This is the lowest I/O base address (so bar values are always positive), 51 * and it *must* be the start of ISA space if an ISA bus exists because 52 * ISA drivers use hard coded offsets. If no ISA bus exists nothing 53 * is mapped on the first 64K of IO space 54 */ 55 unsigned long pci_io_base = ISA_IO_BASE; 56 EXPORT_SYMBOL(pci_io_base); 57 58 LIST_HEAD(hose_list); 59 60 static struct dma_mapping_ops *pci_dma_ops; 61 62 /* XXX kill that some day ... */ 63 int global_phb_number; /* Global phb counter */ 64 65 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops) 66 { 67 pci_dma_ops = dma_ops; 68 } 69 70 struct dma_mapping_ops *get_pci_dma_ops(void) 71 { 72 return pci_dma_ops; 73 } 74 EXPORT_SYMBOL(get_pci_dma_ops); 75 76 static void fixup_broken_pcnet32(struct pci_dev* dev) 77 { 78 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { 79 dev->vendor = PCI_VENDOR_ID_AMD; 80 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); 81 } 82 } 83 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); 84 85 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 86 struct resource *res) 87 { 88 unsigned long offset = 0; 89 struct pci_controller *hose = pci_bus_to_host(dev->bus); 90 91 if (!hose) 92 return; 93 94 if (res->flags & IORESOURCE_IO) 95 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 96 97 if (res->flags & IORESOURCE_MEM) 98 offset = hose->pci_mem_offset; 99 100 region->start = res->start - offset; 101 region->end = res->end - offset; 102 } 103 104 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 105 struct pci_bus_region *region) 106 { 107 unsigned long offset = 0; 108 struct pci_controller *hose = pci_bus_to_host(dev->bus); 109 110 if (!hose) 111 return; 112 113 if (res->flags & IORESOURCE_IO) 114 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 115 116 if (res->flags & IORESOURCE_MEM) 117 offset = hose->pci_mem_offset; 118 119 res->start = region->start + offset; 120 res->end = region->end + offset; 121 } 122 123 #ifdef CONFIG_HOTPLUG 124 EXPORT_SYMBOL(pcibios_resource_to_bus); 125 EXPORT_SYMBOL(pcibios_bus_to_resource); 126 #endif 127 128 /* 129 * We need to avoid collisions with `mirrored' VGA ports 130 * and other strange ISA hardware, so we always want the 131 * addresses to be allocated in the 0x000-0x0ff region 132 * modulo 0x400. 133 * 134 * Why? Because some silly external IO cards only decode 135 * the low 10 bits of the IO address. The 0x00-0xff region 136 * is reserved for motherboard devices that decode all 16 137 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, 138 * but we want to try to avoid allocating at 0x2900-0x2bff 139 * which might have be mirrored at 0x0100-0x03ff.. 140 */ 141 void pcibios_align_resource(void *data, struct resource *res, 142 resource_size_t size, resource_size_t align) 143 { 144 struct pci_dev *dev = data; 145 struct pci_controller *hose = pci_bus_to_host(dev->bus); 146 resource_size_t start = res->start; 147 unsigned long alignto; 148 149 if (res->flags & IORESOURCE_IO) { 150 unsigned long offset = (unsigned long)hose->io_base_virt - 151 _IO_BASE; 152 /* Make sure we start at our min on all hoses */ 153 if (start - offset < PCIBIOS_MIN_IO) 154 start = PCIBIOS_MIN_IO + offset; 155 156 /* 157 * Put everything into 0x00-0xff region modulo 0x400 158 */ 159 if (start & 0x300) 160 start = (start + 0x3ff) & ~0x3ff; 161 162 } else if (res->flags & IORESOURCE_MEM) { 163 /* Make sure we start at our min on all hoses */ 164 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM) 165 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset; 166 167 /* Align to multiple of size of minimum base. */ 168 alignto = max(0x1000UL, align); 169 start = ALIGN(start, alignto); 170 } 171 172 res->start = start; 173 } 174 175 static DEFINE_SPINLOCK(hose_spinlock); 176 177 /* 178 * pci_controller(phb) initialized common variables. 179 */ 180 static void __devinit pci_setup_pci_controller(struct pci_controller *hose) 181 { 182 memset(hose, 0, sizeof(struct pci_controller)); 183 184 spin_lock(&hose_spinlock); 185 hose->global_number = global_phb_number++; 186 list_add_tail(&hose->list_node, &hose_list); 187 spin_unlock(&hose_spinlock); 188 } 189 190 struct pci_controller * pcibios_alloc_controller(struct device_node *dev) 191 { 192 struct pci_controller *phb; 193 194 if (mem_init_done) 195 phb = kmalloc(sizeof(struct pci_controller), GFP_KERNEL); 196 else 197 phb = alloc_bootmem(sizeof (struct pci_controller)); 198 if (phb == NULL) 199 return NULL; 200 pci_setup_pci_controller(phb); 201 phb->arch_data = dev; 202 phb->is_dynamic = mem_init_done; 203 if (dev) { 204 int nid = of_node_to_nid(dev); 205 206 if (nid < 0 || !node_online(nid)) 207 nid = -1; 208 209 PHB_SET_NODE(phb, nid); 210 } 211 return phb; 212 } 213 214 void pcibios_free_controller(struct pci_controller *phb) 215 { 216 spin_lock(&hose_spinlock); 217 list_del(&phb->list_node); 218 spin_unlock(&hose_spinlock); 219 220 if (phb->is_dynamic) 221 kfree(phb); 222 } 223 224 void __devinit pcibios_claim_one_bus(struct pci_bus *b) 225 { 226 struct pci_dev *dev; 227 struct pci_bus *child_bus; 228 229 list_for_each_entry(dev, &b->devices, bus_list) { 230 int i; 231 232 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 233 struct resource *r = &dev->resource[i]; 234 235 if (r->parent || !r->start || !r->flags) 236 continue; 237 pci_claim_resource(dev, i); 238 } 239 } 240 241 list_for_each_entry(child_bus, &b->children, node) 242 pcibios_claim_one_bus(child_bus); 243 } 244 #ifdef CONFIG_HOTPLUG 245 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 246 #endif 247 248 static void __init pcibios_claim_of_setup(void) 249 { 250 struct pci_bus *b; 251 252 if (firmware_has_feature(FW_FEATURE_ISERIES)) 253 return; 254 255 list_for_each_entry(b, &pci_root_buses, node) 256 pcibios_claim_one_bus(b); 257 } 258 259 static u32 get_int_prop(struct device_node *np, const char *name, u32 def) 260 { 261 const u32 *prop; 262 int len; 263 264 prop = of_get_property(np, name, &len); 265 if (prop && len >= 4) 266 return *prop; 267 return def; 268 } 269 270 static unsigned int pci_parse_of_flags(u32 addr0) 271 { 272 unsigned int flags = 0; 273 274 if (addr0 & 0x02000000) { 275 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; 276 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; 277 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; 278 if (addr0 & 0x40000000) 279 flags |= IORESOURCE_PREFETCH 280 | PCI_BASE_ADDRESS_MEM_PREFETCH; 281 } else if (addr0 & 0x01000000) 282 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; 283 return flags; 284 } 285 286 #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) 287 288 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) 289 { 290 u64 base, size; 291 unsigned int flags; 292 struct resource *res; 293 const u32 *addrs; 294 u32 i; 295 int proplen; 296 297 addrs = of_get_property(node, "assigned-addresses", &proplen); 298 if (!addrs) 299 return; 300 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 301 for (; proplen >= 20; proplen -= 20, addrs += 5) { 302 flags = pci_parse_of_flags(addrs[0]); 303 if (!flags) 304 continue; 305 base = GET_64BIT(addrs, 1); 306 size = GET_64BIT(addrs, 3); 307 if (!size) 308 continue; 309 i = addrs[0] & 0xff; 310 DBG(" base: %llx, size: %llx, i: %x\n", 311 (unsigned long long)base, (unsigned long long)size, i); 312 313 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 314 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 315 } else if (i == dev->rom_base_reg) { 316 res = &dev->resource[PCI_ROM_RESOURCE]; 317 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; 318 } else { 319 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); 320 continue; 321 } 322 res->start = base; 323 res->end = base + size - 1; 324 res->flags = flags; 325 res->name = pci_name(dev); 326 fixup_resource(res, dev); 327 } 328 } 329 330 struct pci_dev *of_create_pci_dev(struct device_node *node, 331 struct pci_bus *bus, int devfn) 332 { 333 struct pci_dev *dev; 334 const char *type; 335 336 dev = alloc_pci_dev(); 337 if (!dev) 338 return NULL; 339 type = of_get_property(node, "device_type", NULL); 340 if (type == NULL) 341 type = ""; 342 343 DBG(" create device, devfn: %x, type: %s\n", devfn, type); 344 345 dev->bus = bus; 346 dev->sysdata = node; 347 dev->dev.parent = bus->bridge; 348 dev->dev.bus = &pci_bus_type; 349 dev->devfn = devfn; 350 dev->multifunction = 0; /* maybe a lie? */ 351 352 dev->vendor = get_int_prop(node, "vendor-id", 0xffff); 353 dev->device = get_int_prop(node, "device-id", 0xffff); 354 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); 355 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0); 356 357 dev->cfg_size = pci_cfg_space_size(dev); 358 359 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 360 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 361 dev->class = get_int_prop(node, "class-code", 0); 362 363 DBG(" class: 0x%x\n", dev->class); 364 365 dev->current_state = 4; /* unknown power state */ 366 dev->error_state = pci_channel_io_normal; 367 368 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { 369 /* a PCI-PCI bridge */ 370 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; 371 dev->rom_base_reg = PCI_ROM_ADDRESS1; 372 } else if (!strcmp(type, "cardbus")) { 373 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; 374 } else { 375 dev->hdr_type = PCI_HEADER_TYPE_NORMAL; 376 dev->rom_base_reg = PCI_ROM_ADDRESS; 377 /* Maybe do a default OF mapping here */ 378 dev->irq = NO_IRQ; 379 } 380 381 pci_parse_of_addrs(node, dev); 382 383 DBG(" adding to system ...\n"); 384 385 pci_device_add(dev, bus); 386 387 return dev; 388 } 389 EXPORT_SYMBOL(of_create_pci_dev); 390 391 void __devinit of_scan_bus(struct device_node *node, 392 struct pci_bus *bus) 393 { 394 struct device_node *child = NULL; 395 const u32 *reg; 396 int reglen, devfn; 397 struct pci_dev *dev; 398 399 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); 400 401 while ((child = of_get_next_child(node, child)) != NULL) { 402 DBG(" * %s\n", child->full_name); 403 reg = of_get_property(child, "reg", ®len); 404 if (reg == NULL || reglen < 20) 405 continue; 406 devfn = (reg[0] >> 8) & 0xff; 407 408 /* create a new pci_dev for this device */ 409 dev = of_create_pci_dev(child, bus, devfn); 410 if (!dev) 411 continue; 412 DBG("dev header type: %x\n", dev->hdr_type); 413 414 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 415 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 416 of_scan_pci_bridge(child, dev); 417 } 418 419 do_bus_setup(bus); 420 } 421 EXPORT_SYMBOL(of_scan_bus); 422 423 void __devinit of_scan_pci_bridge(struct device_node *node, 424 struct pci_dev *dev) 425 { 426 struct pci_bus *bus; 427 const u32 *busrange, *ranges; 428 int len, i, mode; 429 struct resource *res; 430 unsigned int flags; 431 u64 size; 432 433 DBG("of_scan_pci_bridge(%s)\n", node->full_name); 434 435 /* parse bus-range property */ 436 busrange = of_get_property(node, "bus-range", &len); 437 if (busrange == NULL || len != 8) { 438 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", 439 node->full_name); 440 return; 441 } 442 ranges = of_get_property(node, "ranges", &len); 443 if (ranges == NULL) { 444 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", 445 node->full_name); 446 return; 447 } 448 449 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 450 if (!bus) { 451 printk(KERN_ERR "Failed to create pci bus for %s\n", 452 node->full_name); 453 return; 454 } 455 456 bus->primary = dev->bus->number; 457 bus->subordinate = busrange[1]; 458 bus->bridge_ctl = 0; 459 bus->sysdata = node; 460 461 /* parse ranges property */ 462 /* PCI #address-cells == 3 and #size-cells == 2 always */ 463 res = &dev->resource[PCI_BRIDGE_RESOURCES]; 464 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { 465 res->flags = 0; 466 bus->resource[i] = res; 467 ++res; 468 } 469 i = 1; 470 for (; len >= 32; len -= 32, ranges += 8) { 471 flags = pci_parse_of_flags(ranges[0]); 472 size = GET_64BIT(ranges, 6); 473 if (flags == 0 || size == 0) 474 continue; 475 if (flags & IORESOURCE_IO) { 476 res = bus->resource[0]; 477 if (res->flags) { 478 printk(KERN_ERR "PCI: ignoring extra I/O range" 479 " for bridge %s\n", node->full_name); 480 continue; 481 } 482 } else { 483 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { 484 printk(KERN_ERR "PCI: too many memory ranges" 485 " for bridge %s\n", node->full_name); 486 continue; 487 } 488 res = bus->resource[i]; 489 ++i; 490 } 491 res->start = GET_64BIT(ranges, 1); 492 res->end = res->start + size - 1; 493 res->flags = flags; 494 fixup_resource(res, dev); 495 } 496 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 497 bus->number); 498 DBG(" bus name: %s\n", bus->name); 499 500 mode = PCI_PROBE_NORMAL; 501 if (ppc_md.pci_probe_mode) 502 mode = ppc_md.pci_probe_mode(bus); 503 DBG(" probe mode: %d\n", mode); 504 505 if (mode == PCI_PROBE_DEVTREE) 506 of_scan_bus(node, bus); 507 else if (mode == PCI_PROBE_NORMAL) 508 pci_scan_child_bus(bus); 509 } 510 EXPORT_SYMBOL(of_scan_pci_bridge); 511 512 void __devinit scan_phb(struct pci_controller *hose) 513 { 514 struct pci_bus *bus; 515 struct device_node *node = hose->arch_data; 516 int i, mode; 517 struct resource *res; 518 519 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); 520 521 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); 522 if (bus == NULL) { 523 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", 524 hose->global_number); 525 return; 526 } 527 bus->secondary = hose->first_busno; 528 hose->bus = bus; 529 530 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 531 pcibios_map_io_space(bus); 532 533 bus->resource[0] = res = &hose->io_resource; 534 if (res->flags && request_resource(&ioport_resource, res)) { 535 printk(KERN_ERR "Failed to request PCI IO region " 536 "on PCI domain %04x\n", hose->global_number); 537 DBG("res->start = 0x%016lx, res->end = 0x%016lx\n", 538 res->start, res->end); 539 } 540 541 for (i = 0; i < 3; ++i) { 542 res = &hose->mem_resources[i]; 543 bus->resource[i+1] = res; 544 if (res->flags && request_resource(&iomem_resource, res)) 545 printk(KERN_ERR "Failed to request PCI memory region " 546 "on PCI domain %04x\n", hose->global_number); 547 } 548 549 mode = PCI_PROBE_NORMAL; 550 551 if (node && ppc_md.pci_probe_mode) 552 mode = ppc_md.pci_probe_mode(bus); 553 DBG(" probe mode: %d\n", mode); 554 if (mode == PCI_PROBE_DEVTREE) { 555 bus->subordinate = hose->last_busno; 556 of_scan_bus(node, bus); 557 } 558 559 if (mode == PCI_PROBE_NORMAL) 560 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); 561 } 562 563 static int __init pcibios_init(void) 564 { 565 struct pci_controller *hose, *tmp; 566 567 /* For now, override phys_mem_access_prot. If we need it, 568 * later, we may move that initialization to each ppc_md 569 */ 570 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 571 572 if (firmware_has_feature(FW_FEATURE_ISERIES)) 573 iSeries_pcibios_init(); 574 575 printk(KERN_DEBUG "PCI: Probing PCI hardware\n"); 576 577 /* Scan all of the recorded PCI controllers. */ 578 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 579 scan_phb(hose); 580 pci_bus_add_devices(hose->bus); 581 } 582 583 if (!firmware_has_feature(FW_FEATURE_ISERIES)) { 584 if (pci_probe_only) 585 pcibios_claim_of_setup(); 586 else 587 /* FIXME: `else' will be removed when 588 pci_assign_unassigned_resources() is able to work 589 correctly with [partially] allocated PCI tree. */ 590 pci_assign_unassigned_resources(); 591 } 592 593 /* Call machine dependent final fixup */ 594 if (ppc_md.pcibios_fixup) 595 ppc_md.pcibios_fixup(); 596 597 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); 598 599 return 0; 600 } 601 602 subsys_initcall(pcibios_init); 603 604 char __init *pcibios_setup(char *str) 605 { 606 return str; 607 } 608 609 int pcibios_enable_device(struct pci_dev *dev, int mask) 610 { 611 u16 cmd, oldcmd; 612 int i; 613 614 pci_read_config_word(dev, PCI_COMMAND, &cmd); 615 oldcmd = cmd; 616 617 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 618 struct resource *res = &dev->resource[i]; 619 620 /* Only set up the requested stuff */ 621 if (!(mask & (1<<i))) 622 continue; 623 624 if (res->flags & IORESOURCE_IO) 625 cmd |= PCI_COMMAND_IO; 626 if (res->flags & IORESOURCE_MEM) 627 cmd |= PCI_COMMAND_MEMORY; 628 } 629 630 if (cmd != oldcmd) { 631 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", 632 pci_name(dev), cmd); 633 /* Enable the appropriate bits in the PCI command register. */ 634 pci_write_config_word(dev, PCI_COMMAND, cmd); 635 } 636 return 0; 637 } 638 639 /* 640 * Return the domain number for this bus. 641 */ 642 int pci_domain_nr(struct pci_bus *bus) 643 { 644 if (firmware_has_feature(FW_FEATURE_ISERIES)) 645 return 0; 646 else { 647 struct pci_controller *hose = pci_bus_to_host(bus); 648 649 return hose->global_number; 650 } 651 } 652 653 EXPORT_SYMBOL(pci_domain_nr); 654 655 /* Decide whether to display the domain number in /proc */ 656 int pci_proc_domain(struct pci_bus *bus) 657 { 658 if (firmware_has_feature(FW_FEATURE_ISERIES)) 659 return 0; 660 else { 661 struct pci_controller *hose = pci_bus_to_host(bus); 662 return hose->buid; 663 } 664 } 665 666 /* 667 * Platform support for /proc/bus/pci/X/Y mmap()s, 668 * modelled on the sparc64 implementation by Dave Miller. 669 * -- paulus. 670 */ 671 672 /* 673 * Adjust vm_pgoff of VMA such that it is the physical page offset 674 * corresponding to the 32-bit pci bus offset for DEV requested by the user. 675 * 676 * Basically, the user finds the base address for his device which he wishes 677 * to mmap. They read the 32-bit value from the config space base register, 678 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the 679 * offset parameter of mmap on /proc/bus/pci/XXX for that device. 680 * 681 * Returns negative error code on failure, zero on success. 682 */ 683 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 684 resource_size_t *offset, 685 enum pci_mmap_state mmap_state) 686 { 687 struct pci_controller *hose = pci_bus_to_host(dev->bus); 688 unsigned long io_offset = 0; 689 int i, res_bit; 690 691 if (hose == 0) 692 return NULL; /* should never happen */ 693 694 /* If memory, add on the PCI bridge address offset */ 695 if (mmap_state == pci_mmap_mem) { 696 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ 697 *offset += hose->pci_mem_offset; 698 #endif 699 res_bit = IORESOURCE_MEM; 700 } else { 701 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; 702 *offset += io_offset; 703 res_bit = IORESOURCE_IO; 704 } 705 706 /* 707 * Check that the offset requested corresponds to one of the 708 * resources of the device. 709 */ 710 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 711 struct resource *rp = &dev->resource[i]; 712 int flags = rp->flags; 713 714 /* treat ROM as memory (should be already) */ 715 if (i == PCI_ROM_RESOURCE) 716 flags |= IORESOURCE_MEM; 717 718 /* Active and same type? */ 719 if ((flags & res_bit) == 0) 720 continue; 721 722 /* In the range of this resource? */ 723 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) 724 continue; 725 726 /* found it! construct the final physical address */ 727 if (mmap_state == pci_mmap_io) 728 *offset += hose->io_base_phys - io_offset; 729 return rp; 730 } 731 732 return NULL; 733 } 734 735 /* 736 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 737 * device mapping. 738 */ 739 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, 740 pgprot_t protection, 741 enum pci_mmap_state mmap_state, 742 int write_combine) 743 { 744 unsigned long prot = pgprot_val(protection); 745 746 /* Write combine is always 0 on non-memory space mappings. On 747 * memory space, if the user didn't pass 1, we check for a 748 * "prefetchable" resource. This is a bit hackish, but we use 749 * this to workaround the inability of /sysfs to provide a write 750 * combine bit 751 */ 752 if (mmap_state != pci_mmap_mem) 753 write_combine = 0; 754 else if (write_combine == 0) { 755 if (rp->flags & IORESOURCE_PREFETCH) 756 write_combine = 1; 757 } 758 759 /* XXX would be nice to have a way to ask for write-through */ 760 prot |= _PAGE_NO_CACHE; 761 if (write_combine) 762 prot &= ~_PAGE_GUARDED; 763 else 764 prot |= _PAGE_GUARDED; 765 766 return __pgprot(prot); 767 } 768 769 /* 770 * This one is used by /dev/mem and fbdev who have no clue about the 771 * PCI device, it tries to find the PCI device first and calls the 772 * above routine 773 */ 774 pgprot_t pci_phys_mem_access_prot(struct file *file, 775 unsigned long pfn, 776 unsigned long size, 777 pgprot_t protection) 778 { 779 struct pci_dev *pdev = NULL; 780 struct resource *found = NULL; 781 unsigned long prot = pgprot_val(protection); 782 unsigned long offset = pfn << PAGE_SHIFT; 783 int i; 784 785 if (page_is_ram(pfn)) 786 return __pgprot(prot); 787 788 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 789 790 for_each_pci_dev(pdev) { 791 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 792 struct resource *rp = &pdev->resource[i]; 793 int flags = rp->flags; 794 795 /* Active and same type? */ 796 if ((flags & IORESOURCE_MEM) == 0) 797 continue; 798 /* In the range of this resource? */ 799 if (offset < (rp->start & PAGE_MASK) || 800 offset > rp->end) 801 continue; 802 found = rp; 803 break; 804 } 805 if (found) 806 break; 807 } 808 if (found) { 809 if (found->flags & IORESOURCE_PREFETCH) 810 prot &= ~_PAGE_GUARDED; 811 pci_dev_put(pdev); 812 } 813 814 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); 815 816 return __pgprot(prot); 817 } 818 819 820 /* 821 * Perform the actual remap of the pages for a PCI device mapping, as 822 * appropriate for this architecture. The region in the process to map 823 * is described by vm_start and vm_end members of VMA, the base physical 824 * address is found in vm_pgoff. 825 * The pci device structure is provided so that architectures may make mapping 826 * decisions on a per-device or per-bus basis. 827 * 828 * Returns a negative error code on failure, zero on success. 829 */ 830 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 831 enum pci_mmap_state mmap_state, int write_combine) 832 { 833 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT; 834 struct resource *rp; 835 int ret; 836 837 rp = __pci_mmap_make_offset(dev, &offset, mmap_state); 838 if (rp == NULL) 839 return -EINVAL; 840 841 vma->vm_pgoff = offset >> PAGE_SHIFT; 842 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 843 vma->vm_page_prot, 844 mmap_state, write_combine); 845 846 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 847 vma->vm_end - vma->vm_start, vma->vm_page_prot); 848 849 return ret; 850 } 851 852 static ssize_t pci_show_devspec(struct device *dev, 853 struct device_attribute *attr, char *buf) 854 { 855 struct pci_dev *pdev; 856 struct device_node *np; 857 858 pdev = to_pci_dev (dev); 859 np = pci_device_to_OF_node(pdev); 860 if (np == NULL || np->full_name == NULL) 861 return 0; 862 return sprintf(buf, "%s", np->full_name); 863 } 864 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 865 866 void pcibios_add_platform_entries(struct pci_dev *pdev) 867 { 868 device_create_file(&pdev->dev, &dev_attr_devspec); 869 } 870 871 void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 872 struct device_node *dev, int prim) 873 { 874 const unsigned int *ranges; 875 unsigned int pci_space; 876 unsigned long size; 877 int rlen = 0; 878 int memno = 0; 879 struct resource *res; 880 int np, na = of_n_addr_cells(dev); 881 unsigned long pci_addr, cpu_phys_addr; 882 883 np = na + 5; 884 885 /* From "PCI Binding to 1275" 886 * The ranges property is laid out as an array of elements, 887 * each of which comprises: 888 * cells 0 - 2: a PCI address 889 * cells 3 or 3+4: a CPU physical address 890 * (size depending on dev->n_addr_cells) 891 * cells 4+5 or 5+6: the size of the range 892 */ 893 ranges = of_get_property(dev, "ranges", &rlen); 894 if (ranges == NULL) 895 return; 896 hose->io_base_phys = 0; 897 while ((rlen -= np * sizeof(unsigned int)) >= 0) { 898 res = NULL; 899 pci_space = ranges[0]; 900 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 901 cpu_phys_addr = of_translate_address(dev, &ranges[3]); 902 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4]; 903 ranges += np; 904 if (size == 0) 905 continue; 906 907 /* Now consume following elements while they are contiguous */ 908 while (rlen >= np * sizeof(unsigned int)) { 909 unsigned long addr, phys; 910 911 if (ranges[0] != pci_space) 912 break; 913 addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 914 phys = ranges[3]; 915 if (na >= 2) 916 phys = (phys << 32) | ranges[4]; 917 if (addr != pci_addr + size || 918 phys != cpu_phys_addr + size) 919 break; 920 921 size += ((unsigned long)ranges[na+3] << 32) 922 | ranges[na+4]; 923 ranges += np; 924 rlen -= np * sizeof(unsigned int); 925 } 926 927 switch ((pci_space >> 24) & 0x3) { 928 case 1: /* I/O space */ 929 hose->io_base_phys = cpu_phys_addr - pci_addr; 930 /* handle from 0 to top of I/O window */ 931 hose->pci_io_size = pci_addr + size; 932 933 res = &hose->io_resource; 934 res->flags = IORESOURCE_IO; 935 res->start = pci_addr; 936 DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number, 937 res->start, res->start + size - 1); 938 break; 939 case 2: /* memory space */ 940 memno = 0; 941 while (memno < 3 && hose->mem_resources[memno].flags) 942 ++memno; 943 944 if (memno == 0) 945 hose->pci_mem_offset = cpu_phys_addr - pci_addr; 946 if (memno < 3) { 947 res = &hose->mem_resources[memno]; 948 res->flags = IORESOURCE_MEM; 949 res->start = cpu_phys_addr; 950 DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number, 951 res->start, res->start + size - 1); 952 } 953 break; 954 } 955 if (res != NULL) { 956 res->name = dev->full_name; 957 res->end = res->start + size - 1; 958 res->parent = NULL; 959 res->sibling = NULL; 960 res->child = NULL; 961 } 962 } 963 } 964 965 #ifdef CONFIG_HOTPLUG 966 967 int pcibios_unmap_io_space(struct pci_bus *bus) 968 { 969 struct pci_controller *hose; 970 971 WARN_ON(bus == NULL); 972 973 /* If this is not a PHB, we only flush the hash table over 974 * the area mapped by this bridge. We don't play with the PTE 975 * mappings since we might have to deal with sub-page alignemnts 976 * so flushing the hash table is the only sane way to make sure 977 * that no hash entries are covering that removed bridge area 978 * while still allowing other busses overlapping those pages 979 */ 980 if (bus->self) { 981 struct resource *res = bus->resource[0]; 982 983 DBG("IO unmapping for PCI-PCI bridge %s\n", 984 pci_name(bus->self)); 985 986 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, 987 res->end - res->start + 1); 988 return 0; 989 } 990 991 /* Get the host bridge */ 992 hose = pci_bus_to_host(bus); 993 994 /* Check if we have IOs allocated */ 995 if (hose->io_base_alloc == 0) 996 return 0; 997 998 DBG("IO unmapping for PHB %s\n", 999 ((struct device_node *)hose->arch_data)->full_name); 1000 DBG(" alloc=0x%p\n", hose->io_base_alloc); 1001 1002 /* This is a PHB, we fully unmap the IO area */ 1003 vunmap(hose->io_base_alloc); 1004 1005 return 0; 1006 } 1007 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space); 1008 1009 #endif /* CONFIG_HOTPLUG */ 1010 1011 int __devinit pcibios_map_io_space(struct pci_bus *bus) 1012 { 1013 struct vm_struct *area; 1014 unsigned long phys_page; 1015 unsigned long size_page; 1016 unsigned long io_virt_offset; 1017 struct pci_controller *hose; 1018 1019 WARN_ON(bus == NULL); 1020 1021 /* If this not a PHB, nothing to do, page tables still exist and 1022 * thus HPTEs will be faulted in when needed 1023 */ 1024 if (bus->self) { 1025 DBG("IO mapping for PCI-PCI bridge %s\n", 1026 pci_name(bus->self)); 1027 DBG(" virt=0x%016lx...0x%016lx\n", 1028 bus->resource[0]->start + _IO_BASE, 1029 bus->resource[0]->end + _IO_BASE); 1030 return 0; 1031 } 1032 1033 /* Get the host bridge */ 1034 hose = pci_bus_to_host(bus); 1035 phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); 1036 size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE); 1037 1038 /* Make sure IO area address is clear */ 1039 hose->io_base_alloc = NULL; 1040 1041 /* If there's no IO to map on that bus, get away too */ 1042 if (hose->pci_io_size == 0 || hose->io_base_phys == 0) 1043 return 0; 1044 1045 /* Let's allocate some IO space for that guy. We don't pass 1046 * VM_IOREMAP because we don't care about alignment tricks that 1047 * the core does in that case. Maybe we should due to stupid card 1048 * with incomplete address decoding but I'd rather not deal with 1049 * those outside of the reserved 64K legacy region. 1050 */ 1051 area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END); 1052 if (area == NULL) 1053 return -ENOMEM; 1054 hose->io_base_alloc = area->addr; 1055 hose->io_base_virt = (void __iomem *)(area->addr + 1056 hose->io_base_phys - phys_page); 1057 1058 DBG("IO mapping for PHB %s\n", 1059 ((struct device_node *)hose->arch_data)->full_name); 1060 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n", 1061 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); 1062 DBG(" size=0x%016lx (alloc=0x%016lx)\n", 1063 hose->pci_io_size, size_page); 1064 1065 /* Establish the mapping */ 1066 if (__ioremap_at(phys_page, area->addr, size_page, 1067 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL) 1068 return -ENOMEM; 1069 1070 /* Fixup hose IO resource */ 1071 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1072 hose->io_resource.start += io_virt_offset; 1073 hose->io_resource.end += io_virt_offset; 1074 1075 DBG(" hose->io_resource=0x%016lx...0x%016lx\n", 1076 hose->io_resource.start, hose->io_resource.end); 1077 1078 return 0; 1079 } 1080 EXPORT_SYMBOL_GPL(pcibios_map_io_space); 1081 1082 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) 1083 { 1084 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1085 unsigned long offset; 1086 1087 if (res->flags & IORESOURCE_IO) { 1088 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1089 res->start += offset; 1090 res->end += offset; 1091 } else if (res->flags & IORESOURCE_MEM) { 1092 res->start += hose->pci_mem_offset; 1093 res->end += hose->pci_mem_offset; 1094 } 1095 } 1096 1097 void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, 1098 struct pci_bus *bus) 1099 { 1100 /* Update device resources. */ 1101 int i; 1102 1103 DBG("%s: Fixup resources:\n", pci_name(dev)); 1104 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 1105 struct resource *res = &dev->resource[i]; 1106 if (!res->flags) 1107 continue; 1108 1109 DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n", 1110 i, res->flags, res->start, res->end); 1111 1112 fixup_resource(res, dev); 1113 1114 DBG(" > %08lx:0x%016lx...0x%016lx\n", 1115 res->flags, res->start, res->end); 1116 } 1117 } 1118 EXPORT_SYMBOL(pcibios_fixup_device_resources); 1119 1120 void __devinit pcibios_setup_new_device(struct pci_dev *dev) 1121 { 1122 struct dev_archdata *sd = &dev->dev.archdata; 1123 1124 sd->of_node = pci_device_to_OF_node(dev); 1125 1126 DBG("PCI device %s OF node: %s\n", pci_name(dev), 1127 sd->of_node ? sd->of_node->full_name : "<none>"); 1128 1129 sd->dma_ops = pci_dma_ops; 1130 #ifdef CONFIG_NUMA 1131 sd->numa_node = pcibus_to_node(dev->bus); 1132 #else 1133 sd->numa_node = -1; 1134 #endif 1135 if (ppc_md.pci_dma_dev_setup) 1136 ppc_md.pci_dma_dev_setup(dev); 1137 } 1138 EXPORT_SYMBOL(pcibios_setup_new_device); 1139 1140 static void __devinit do_bus_setup(struct pci_bus *bus) 1141 { 1142 struct pci_dev *dev; 1143 1144 if (ppc_md.pci_dma_bus_setup) 1145 ppc_md.pci_dma_bus_setup(bus); 1146 1147 list_for_each_entry(dev, &bus->devices, bus_list) 1148 pcibios_setup_new_device(dev); 1149 1150 /* Read default IRQs and fixup if necessary */ 1151 list_for_each_entry(dev, &bus->devices, bus_list) { 1152 pci_read_irq_line(dev); 1153 if (ppc_md.pci_irq_fixup) 1154 ppc_md.pci_irq_fixup(dev); 1155 } 1156 } 1157 1158 void __devinit pcibios_fixup_bus(struct pci_bus *bus) 1159 { 1160 struct pci_dev *dev = bus->self; 1161 struct device_node *np; 1162 1163 np = pci_bus_to_OF_node(bus); 1164 1165 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>"); 1166 1167 if (dev && pci_probe_only && 1168 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 1169 /* This is a subordinate bridge */ 1170 1171 pci_read_bridge_bases(bus); 1172 pcibios_fixup_device_resources(dev, bus); 1173 } 1174 1175 do_bus_setup(bus); 1176 1177 if (!pci_probe_only) 1178 return; 1179 1180 list_for_each_entry(dev, &bus->devices, bus_list) 1181 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) 1182 pcibios_fixup_device_resources(dev, bus); 1183 } 1184 EXPORT_SYMBOL(pcibios_fixup_bus); 1185 1186 /* 1187 * Reads the interrupt pin to determine if interrupt is use by card. 1188 * If the interrupt is used, then gets the interrupt line from the 1189 * openfirmware and sets it in the pci_dev and pci_config line. 1190 */ 1191 int pci_read_irq_line(struct pci_dev *pci_dev) 1192 { 1193 struct of_irq oirq; 1194 unsigned int virq; 1195 1196 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1197 1198 #ifdef DEBUG 1199 memset(&oirq, 0xff, sizeof(oirq)); 1200 #endif 1201 /* Try to get a mapping from the device-tree */ 1202 if (of_irq_map_pci(pci_dev, &oirq)) { 1203 u8 line, pin; 1204 1205 /* If that fails, lets fallback to what is in the config 1206 * space and map that through the default controller. We 1207 * also set the type to level low since that's what PCI 1208 * interrupts are. If your platform does differently, then 1209 * either provide a proper interrupt tree or don't use this 1210 * function. 1211 */ 1212 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) 1213 return -1; 1214 if (pin == 0) 1215 return -1; 1216 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || 1217 line == 0xff) { 1218 return -1; 1219 } 1220 DBG(" -> no map ! Using irq line %d from PCI config\n", line); 1221 1222 virq = irq_create_mapping(NULL, line); 1223 if (virq != NO_IRQ) 1224 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 1225 } else { 1226 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 1227 oirq.size, oirq.specifier[0], oirq.specifier[1], 1228 oirq.controller->full_name); 1229 1230 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 1231 oirq.size); 1232 } 1233 if(virq == NO_IRQ) { 1234 DBG(" -> failed to map !\n"); 1235 return -1; 1236 } 1237 1238 DBG(" -> mapped to linux irq %d\n", virq); 1239 1240 pci_dev->irq = virq; 1241 1242 return 0; 1243 } 1244 EXPORT_SYMBOL(pci_read_irq_line); 1245 1246 void pci_resource_to_user(const struct pci_dev *dev, int bar, 1247 const struct resource *rsrc, 1248 resource_size_t *start, resource_size_t *end) 1249 { 1250 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1251 resource_size_t offset = 0; 1252 1253 if (hose == NULL) 1254 return; 1255 1256 if (rsrc->flags & IORESOURCE_IO) 1257 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1258 1259 /* We pass a fully fixed up address to userland for MMIO instead of 1260 * a BAR value because X is lame and expects to be able to use that 1261 * to pass to /dev/mem ! 1262 * 1263 * That means that we'll have potentially 64 bits values where some 1264 * userland apps only expect 32 (like X itself since it thinks only 1265 * Sparc has 64 bits MMIO) but if we don't do that, we break it on 1266 * 32 bits CHRPs :-( 1267 * 1268 * Hopefully, the sysfs insterface is immune to that gunk. Once X 1269 * has been fixed (and the fix spread enough), we can re-enable the 1270 * 2 lines below and pass down a BAR value to userland. In that case 1271 * we'll also have to re-enable the matching code in 1272 * __pci_mmap_make_offset(). 1273 * 1274 * BenH. 1275 */ 1276 #if 0 1277 else if (rsrc->flags & IORESOURCE_MEM) 1278 offset = hose->pci_mem_offset; 1279 #endif 1280 1281 *start = rsrc->start - offset; 1282 *end = rsrc->end - offset; 1283 } 1284 1285 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) 1286 { 1287 if (!have_of) 1288 return NULL; 1289 while(node) { 1290 struct pci_controller *hose, *tmp; 1291 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1292 if (hose->arch_data == node) 1293 return hose; 1294 node = node->parent; 1295 } 1296 return NULL; 1297 } 1298 1299 unsigned long pci_address_to_pio(phys_addr_t address) 1300 { 1301 struct pci_controller *hose, *tmp; 1302 1303 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 1304 if (address >= hose->io_base_phys && 1305 address < (hose->io_base_phys + hose->pci_io_size)) { 1306 unsigned long base = 1307 (unsigned long)hose->io_base_virt - _IO_BASE; 1308 return base + (address - hose->io_base_phys); 1309 } 1310 } 1311 return (unsigned int)-1; 1312 } 1313 EXPORT_SYMBOL_GPL(pci_address_to_pio); 1314 1315 1316 #define IOBASE_BRIDGE_NUMBER 0 1317 #define IOBASE_MEMORY 1 1318 #define IOBASE_IO 2 1319 #define IOBASE_ISA_IO 3 1320 #define IOBASE_ISA_MEM 4 1321 1322 long sys_pciconfig_iobase(long which, unsigned long in_bus, 1323 unsigned long in_devfn) 1324 { 1325 struct pci_controller* hose; 1326 struct list_head *ln; 1327 struct pci_bus *bus = NULL; 1328 struct device_node *hose_node; 1329 1330 /* Argh ! Please forgive me for that hack, but that's the 1331 * simplest way to get existing XFree to not lockup on some 1332 * G5 machines... So when something asks for bus 0 io base 1333 * (bus 0 is HT root), we return the AGP one instead. 1334 */ 1335 if (machine_is_compatible("MacRISC4")) 1336 if (in_bus == 0) 1337 in_bus = 0xf0; 1338 1339 /* That syscall isn't quite compatible with PCI domains, but it's 1340 * used on pre-domains setup. We return the first match 1341 */ 1342 1343 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { 1344 bus = pci_bus_b(ln); 1345 if (in_bus >= bus->number && in_bus <= bus->subordinate) 1346 break; 1347 bus = NULL; 1348 } 1349 if (bus == NULL || bus->sysdata == NULL) 1350 return -ENODEV; 1351 1352 hose_node = (struct device_node *)bus->sysdata; 1353 hose = PCI_DN(hose_node)->phb; 1354 1355 switch (which) { 1356 case IOBASE_BRIDGE_NUMBER: 1357 return (long)hose->first_busno; 1358 case IOBASE_MEMORY: 1359 return (long)hose->pci_mem_offset; 1360 case IOBASE_IO: 1361 return (long)hose->io_base_phys; 1362 case IOBASE_ISA_IO: 1363 return (long)isa_io_base; 1364 case IOBASE_ISA_MEM: 1365 return -EINVAL; 1366 } 1367 1368 return -EOPNOTSUPP; 1369 } 1370 1371 #ifdef CONFIG_NUMA 1372 int pcibus_to_node(struct pci_bus *bus) 1373 { 1374 struct pci_controller *phb = pci_bus_to_host(bus); 1375 return phb->node; 1376 } 1377 EXPORT_SYMBOL(pcibus_to_node); 1378 #endif 1379