1 /* 2 * Port for PPC64 David Engebretsen, IBM Corp. 3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. 4 * 5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM 6 * Rework, based on alpha PCI code. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #undef DEBUG 15 16 #include <linux/kernel.h> 17 #include <linux/pci.h> 18 #include <linux/string.h> 19 #include <linux/init.h> 20 #include <linux/bootmem.h> 21 #include <linux/mm.h> 22 #include <linux/list.h> 23 #include <linux/syscalls.h> 24 #include <linux/irq.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/processor.h> 28 #include <asm/io.h> 29 #include <asm/prom.h> 30 #include <asm/pci-bridge.h> 31 #include <asm/byteorder.h> 32 #include <asm/machdep.h> 33 #include <asm/ppc-pci.h> 34 #include <asm/firmware.h> 35 36 #ifdef DEBUG 37 #include <asm/udbg.h> 38 #define DBG(fmt...) printk(fmt) 39 #else 40 #define DBG(fmt...) 41 #endif 42 43 unsigned long pci_probe_only = 1; 44 int pci_assign_all_buses = 0; 45 46 static void fixup_resource(struct resource *res, struct pci_dev *dev); 47 static void do_bus_setup(struct pci_bus *bus); 48 49 /* pci_io_base -- the base address from which io bars are offsets. 50 * This is the lowest I/O base address (so bar values are always positive), 51 * and it *must* be the start of ISA space if an ISA bus exists because 52 * ISA drivers use hard coded offsets. If no ISA bus exists nothing 53 * is mapped on the first 64K of IO space 54 */ 55 unsigned long pci_io_base = ISA_IO_BASE; 56 EXPORT_SYMBOL(pci_io_base); 57 58 LIST_HEAD(hose_list); 59 60 static struct dma_mapping_ops *pci_dma_ops; 61 62 /* XXX kill that some day ... */ 63 int global_phb_number; /* Global phb counter */ 64 65 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops) 66 { 67 pci_dma_ops = dma_ops; 68 } 69 70 struct dma_mapping_ops *get_pci_dma_ops(void) 71 { 72 return pci_dma_ops; 73 } 74 EXPORT_SYMBOL(get_pci_dma_ops); 75 76 static void fixup_broken_pcnet32(struct pci_dev* dev) 77 { 78 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { 79 dev->vendor = PCI_VENDOR_ID_AMD; 80 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); 81 } 82 } 83 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); 84 85 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 86 struct resource *res) 87 { 88 unsigned long offset = 0; 89 struct pci_controller *hose = pci_bus_to_host(dev->bus); 90 91 if (!hose) 92 return; 93 94 if (res->flags & IORESOURCE_IO) 95 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 96 97 if (res->flags & IORESOURCE_MEM) 98 offset = hose->pci_mem_offset; 99 100 region->start = res->start - offset; 101 region->end = res->end - offset; 102 } 103 104 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 105 struct pci_bus_region *region) 106 { 107 unsigned long offset = 0; 108 struct pci_controller *hose = pci_bus_to_host(dev->bus); 109 110 if (!hose) 111 return; 112 113 if (res->flags & IORESOURCE_IO) 114 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 115 116 if (res->flags & IORESOURCE_MEM) 117 offset = hose->pci_mem_offset; 118 119 res->start = region->start + offset; 120 res->end = region->end + offset; 121 } 122 123 #ifdef CONFIG_HOTPLUG 124 EXPORT_SYMBOL(pcibios_resource_to_bus); 125 EXPORT_SYMBOL(pcibios_bus_to_resource); 126 #endif 127 128 /* 129 * We need to avoid collisions with `mirrored' VGA ports 130 * and other strange ISA hardware, so we always want the 131 * addresses to be allocated in the 0x000-0x0ff region 132 * modulo 0x400. 133 * 134 * Why? Because some silly external IO cards only decode 135 * the low 10 bits of the IO address. The 0x00-0xff region 136 * is reserved for motherboard devices that decode all 16 137 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, 138 * but we want to try to avoid allocating at 0x2900-0x2bff 139 * which might have be mirrored at 0x0100-0x03ff.. 140 */ 141 void pcibios_align_resource(void *data, struct resource *res, 142 resource_size_t size, resource_size_t align) 143 { 144 struct pci_dev *dev = data; 145 struct pci_controller *hose = pci_bus_to_host(dev->bus); 146 resource_size_t start = res->start; 147 unsigned long alignto; 148 149 if (res->flags & IORESOURCE_IO) { 150 unsigned long offset = (unsigned long)hose->io_base_virt - 151 _IO_BASE; 152 /* Make sure we start at our min on all hoses */ 153 if (start - offset < PCIBIOS_MIN_IO) 154 start = PCIBIOS_MIN_IO + offset; 155 156 /* 157 * Put everything into 0x00-0xff region modulo 0x400 158 */ 159 if (start & 0x300) 160 start = (start + 0x3ff) & ~0x3ff; 161 162 } else if (res->flags & IORESOURCE_MEM) { 163 /* Make sure we start at our min on all hoses */ 164 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM) 165 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset; 166 167 /* Align to multiple of size of minimum base. */ 168 alignto = max(0x1000UL, align); 169 start = ALIGN(start, alignto); 170 } 171 172 res->start = start; 173 } 174 175 static DEFINE_SPINLOCK(hose_spinlock); 176 177 /* 178 * pci_controller(phb) initialized common variables. 179 */ 180 static void __devinit pci_setup_pci_controller(struct pci_controller *hose) 181 { 182 memset(hose, 0, sizeof(struct pci_controller)); 183 184 spin_lock(&hose_spinlock); 185 hose->global_number = global_phb_number++; 186 list_add_tail(&hose->list_node, &hose_list); 187 spin_unlock(&hose_spinlock); 188 } 189 190 struct pci_controller * pcibios_alloc_controller(struct device_node *dev) 191 { 192 struct pci_controller *phb; 193 194 if (mem_init_done) 195 phb = kmalloc(sizeof(struct pci_controller), GFP_KERNEL); 196 else 197 phb = alloc_bootmem(sizeof (struct pci_controller)); 198 if (phb == NULL) 199 return NULL; 200 pci_setup_pci_controller(phb); 201 phb->arch_data = dev; 202 phb->is_dynamic = mem_init_done; 203 if (dev) { 204 int nid = of_node_to_nid(dev); 205 206 if (nid < 0 || !node_online(nid)) 207 nid = -1; 208 209 PHB_SET_NODE(phb, nid); 210 } 211 return phb; 212 } 213 214 void pcibios_free_controller(struct pci_controller *phb) 215 { 216 spin_lock(&hose_spinlock); 217 list_del(&phb->list_node); 218 spin_unlock(&hose_spinlock); 219 220 if (phb->is_dynamic) 221 kfree(phb); 222 } 223 224 void __devinit pcibios_claim_one_bus(struct pci_bus *b) 225 { 226 struct pci_dev *dev; 227 struct pci_bus *child_bus; 228 229 list_for_each_entry(dev, &b->devices, bus_list) { 230 int i; 231 232 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 233 struct resource *r = &dev->resource[i]; 234 235 if (r->parent || !r->start || !r->flags) 236 continue; 237 pci_claim_resource(dev, i); 238 } 239 } 240 241 list_for_each_entry(child_bus, &b->children, node) 242 pcibios_claim_one_bus(child_bus); 243 } 244 #ifdef CONFIG_HOTPLUG 245 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 246 #endif 247 248 static void __init pcibios_claim_of_setup(void) 249 { 250 struct pci_bus *b; 251 252 if (firmware_has_feature(FW_FEATURE_ISERIES)) 253 return; 254 255 list_for_each_entry(b, &pci_root_buses, node) 256 pcibios_claim_one_bus(b); 257 } 258 259 static u32 get_int_prop(struct device_node *np, const char *name, u32 def) 260 { 261 const u32 *prop; 262 int len; 263 264 prop = of_get_property(np, name, &len); 265 if (prop && len >= 4) 266 return *prop; 267 return def; 268 } 269 270 static unsigned int pci_parse_of_flags(u32 addr0) 271 { 272 unsigned int flags = 0; 273 274 if (addr0 & 0x02000000) { 275 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; 276 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; 277 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; 278 if (addr0 & 0x40000000) 279 flags |= IORESOURCE_PREFETCH 280 | PCI_BASE_ADDRESS_MEM_PREFETCH; 281 } else if (addr0 & 0x01000000) 282 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; 283 return flags; 284 } 285 286 #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) 287 288 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) 289 { 290 u64 base, size; 291 unsigned int flags; 292 struct resource *res; 293 const u32 *addrs; 294 u32 i; 295 int proplen; 296 297 addrs = of_get_property(node, "assigned-addresses", &proplen); 298 if (!addrs) 299 return; 300 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 301 for (; proplen >= 20; proplen -= 20, addrs += 5) { 302 flags = pci_parse_of_flags(addrs[0]); 303 if (!flags) 304 continue; 305 base = GET_64BIT(addrs, 1); 306 size = GET_64BIT(addrs, 3); 307 if (!size) 308 continue; 309 i = addrs[0] & 0xff; 310 DBG(" base: %llx, size: %llx, i: %x\n", 311 (unsigned long long)base, (unsigned long long)size, i); 312 313 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 314 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 315 } else if (i == dev->rom_base_reg) { 316 res = &dev->resource[PCI_ROM_RESOURCE]; 317 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; 318 } else { 319 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); 320 continue; 321 } 322 res->start = base; 323 res->end = base + size - 1; 324 res->flags = flags; 325 res->name = pci_name(dev); 326 fixup_resource(res, dev); 327 } 328 } 329 330 struct pci_dev *of_create_pci_dev(struct device_node *node, 331 struct pci_bus *bus, int devfn) 332 { 333 struct pci_dev *dev; 334 const char *type; 335 336 dev = alloc_pci_dev(); 337 if (!dev) 338 return NULL; 339 type = of_get_property(node, "device_type", NULL); 340 if (type == NULL) 341 type = ""; 342 343 DBG(" create device, devfn: %x, type: %s\n", devfn, type); 344 345 dev->bus = bus; 346 dev->sysdata = node; 347 dev->dev.parent = bus->bridge; 348 dev->dev.bus = &pci_bus_type; 349 dev->devfn = devfn; 350 dev->multifunction = 0; /* maybe a lie? */ 351 352 dev->vendor = get_int_prop(node, "vendor-id", 0xffff); 353 dev->device = get_int_prop(node, "device-id", 0xffff); 354 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); 355 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0); 356 357 dev->cfg_size = pci_cfg_space_size(dev); 358 359 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 360 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 361 dev->class = get_int_prop(node, "class-code", 0); 362 363 DBG(" class: 0x%x\n", dev->class); 364 365 dev->current_state = 4; /* unknown power state */ 366 dev->error_state = pci_channel_io_normal; 367 368 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { 369 /* a PCI-PCI bridge */ 370 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; 371 dev->rom_base_reg = PCI_ROM_ADDRESS1; 372 } else if (!strcmp(type, "cardbus")) { 373 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; 374 } else { 375 dev->hdr_type = PCI_HEADER_TYPE_NORMAL; 376 dev->rom_base_reg = PCI_ROM_ADDRESS; 377 /* Maybe do a default OF mapping here */ 378 dev->irq = NO_IRQ; 379 } 380 381 pci_parse_of_addrs(node, dev); 382 383 DBG(" adding to system ...\n"); 384 385 pci_device_add(dev, bus); 386 387 return dev; 388 } 389 EXPORT_SYMBOL(of_create_pci_dev); 390 391 void __devinit of_scan_bus(struct device_node *node, 392 struct pci_bus *bus) 393 { 394 struct device_node *child = NULL; 395 const u32 *reg; 396 int reglen, devfn; 397 struct pci_dev *dev; 398 399 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); 400 401 while ((child = of_get_next_child(node, child)) != NULL) { 402 DBG(" * %s\n", child->full_name); 403 reg = of_get_property(child, "reg", ®len); 404 if (reg == NULL || reglen < 20) 405 continue; 406 devfn = (reg[0] >> 8) & 0xff; 407 408 /* create a new pci_dev for this device */ 409 dev = of_create_pci_dev(child, bus, devfn); 410 if (!dev) 411 continue; 412 DBG("dev header type: %x\n", dev->hdr_type); 413 414 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 415 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 416 of_scan_pci_bridge(child, dev); 417 } 418 419 do_bus_setup(bus); 420 } 421 EXPORT_SYMBOL(of_scan_bus); 422 423 void __devinit of_scan_pci_bridge(struct device_node *node, 424 struct pci_dev *dev) 425 { 426 struct pci_bus *bus; 427 const u32 *busrange, *ranges; 428 int len, i, mode; 429 struct resource *res; 430 unsigned int flags; 431 u64 size; 432 433 DBG("of_scan_pci_bridge(%s)\n", node->full_name); 434 435 /* parse bus-range property */ 436 busrange = of_get_property(node, "bus-range", &len); 437 if (busrange == NULL || len != 8) { 438 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", 439 node->full_name); 440 return; 441 } 442 ranges = of_get_property(node, "ranges", &len); 443 if (ranges == NULL) { 444 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", 445 node->full_name); 446 return; 447 } 448 449 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 450 if (!bus) { 451 printk(KERN_ERR "Failed to create pci bus for %s\n", 452 node->full_name); 453 return; 454 } 455 456 bus->primary = dev->bus->number; 457 bus->subordinate = busrange[1]; 458 bus->bridge_ctl = 0; 459 bus->sysdata = node; 460 461 /* parse ranges property */ 462 /* PCI #address-cells == 3 and #size-cells == 2 always */ 463 res = &dev->resource[PCI_BRIDGE_RESOURCES]; 464 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { 465 res->flags = 0; 466 bus->resource[i] = res; 467 ++res; 468 } 469 i = 1; 470 for (; len >= 32; len -= 32, ranges += 8) { 471 flags = pci_parse_of_flags(ranges[0]); 472 size = GET_64BIT(ranges, 6); 473 if (flags == 0 || size == 0) 474 continue; 475 if (flags & IORESOURCE_IO) { 476 res = bus->resource[0]; 477 if (res->flags) { 478 printk(KERN_ERR "PCI: ignoring extra I/O range" 479 " for bridge %s\n", node->full_name); 480 continue; 481 } 482 } else { 483 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { 484 printk(KERN_ERR "PCI: too many memory ranges" 485 " for bridge %s\n", node->full_name); 486 continue; 487 } 488 res = bus->resource[i]; 489 ++i; 490 } 491 res->start = GET_64BIT(ranges, 1); 492 res->end = res->start + size - 1; 493 res->flags = flags; 494 fixup_resource(res, dev); 495 } 496 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 497 bus->number); 498 DBG(" bus name: %s\n", bus->name); 499 500 mode = PCI_PROBE_NORMAL; 501 if (ppc_md.pci_probe_mode) 502 mode = ppc_md.pci_probe_mode(bus); 503 DBG(" probe mode: %d\n", mode); 504 505 if (mode == PCI_PROBE_DEVTREE) 506 of_scan_bus(node, bus); 507 else if (mode == PCI_PROBE_NORMAL) 508 pci_scan_child_bus(bus); 509 } 510 EXPORT_SYMBOL(of_scan_pci_bridge); 511 512 void __devinit scan_phb(struct pci_controller *hose) 513 { 514 struct pci_bus *bus; 515 struct device_node *node = hose->arch_data; 516 int i, mode; 517 struct resource *res; 518 519 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); 520 521 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); 522 if (bus == NULL) { 523 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", 524 hose->global_number); 525 return; 526 } 527 bus->secondary = hose->first_busno; 528 hose->bus = bus; 529 530 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 531 pcibios_map_io_space(bus); 532 533 bus->resource[0] = res = &hose->io_resource; 534 if (res->flags && request_resource(&ioport_resource, res)) { 535 printk(KERN_ERR "Failed to request PCI IO region " 536 "on PCI domain %04x\n", hose->global_number); 537 DBG("res->start = 0x%016lx, res->end = 0x%016lx\n", 538 res->start, res->end); 539 } 540 541 for (i = 0; i < 3; ++i) { 542 res = &hose->mem_resources[i]; 543 bus->resource[i+1] = res; 544 if (res->flags && request_resource(&iomem_resource, res)) 545 printk(KERN_ERR "Failed to request PCI memory region " 546 "on PCI domain %04x\n", hose->global_number); 547 } 548 549 mode = PCI_PROBE_NORMAL; 550 551 if (node && ppc_md.pci_probe_mode) 552 mode = ppc_md.pci_probe_mode(bus); 553 DBG(" probe mode: %d\n", mode); 554 if (mode == PCI_PROBE_DEVTREE) { 555 bus->subordinate = hose->last_busno; 556 of_scan_bus(node, bus); 557 } 558 559 if (mode == PCI_PROBE_NORMAL) 560 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); 561 } 562 563 static int __init pcibios_init(void) 564 { 565 struct pci_controller *hose, *tmp; 566 567 /* For now, override phys_mem_access_prot. If we need it, 568 * later, we may move that initialization to each ppc_md 569 */ 570 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 571 572 if (firmware_has_feature(FW_FEATURE_ISERIES)) 573 iSeries_pcibios_init(); 574 575 printk(KERN_DEBUG "PCI: Probing PCI hardware\n"); 576 577 /* Scan all of the recorded PCI controllers. */ 578 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 579 scan_phb(hose); 580 pci_bus_add_devices(hose->bus); 581 } 582 583 if (!firmware_has_feature(FW_FEATURE_ISERIES)) { 584 if (pci_probe_only) 585 pcibios_claim_of_setup(); 586 else 587 /* FIXME: `else' will be removed when 588 pci_assign_unassigned_resources() is able to work 589 correctly with [partially] allocated PCI tree. */ 590 pci_assign_unassigned_resources(); 591 } 592 593 /* Call machine dependent final fixup */ 594 if (ppc_md.pcibios_fixup) 595 ppc_md.pcibios_fixup(); 596 597 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); 598 599 return 0; 600 } 601 602 subsys_initcall(pcibios_init); 603 604 char __init *pcibios_setup(char *str) 605 { 606 return str; 607 } 608 609 int pcibios_enable_device(struct pci_dev *dev, int mask) 610 { 611 u16 cmd, oldcmd; 612 int i; 613 614 pci_read_config_word(dev, PCI_COMMAND, &cmd); 615 oldcmd = cmd; 616 617 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 618 struct resource *res = &dev->resource[i]; 619 620 /* Only set up the requested stuff */ 621 if (!(mask & (1<<i))) 622 continue; 623 624 if (res->flags & IORESOURCE_IO) 625 cmd |= PCI_COMMAND_IO; 626 if (res->flags & IORESOURCE_MEM) 627 cmd |= PCI_COMMAND_MEMORY; 628 } 629 630 if (cmd != oldcmd) { 631 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", 632 pci_name(dev), cmd); 633 /* Enable the appropriate bits in the PCI command register. */ 634 pci_write_config_word(dev, PCI_COMMAND, cmd); 635 } 636 return 0; 637 } 638 639 /* Decide whether to display the domain number in /proc */ 640 int pci_proc_domain(struct pci_bus *bus) 641 { 642 if (firmware_has_feature(FW_FEATURE_ISERIES)) 643 return 0; 644 else { 645 struct pci_controller *hose = pci_bus_to_host(bus); 646 return hose->buid; 647 } 648 } 649 650 /* 651 * Platform support for /proc/bus/pci/X/Y mmap()s, 652 * modelled on the sparc64 implementation by Dave Miller. 653 * -- paulus. 654 */ 655 656 /* 657 * Adjust vm_pgoff of VMA such that it is the physical page offset 658 * corresponding to the 32-bit pci bus offset for DEV requested by the user. 659 * 660 * Basically, the user finds the base address for his device which he wishes 661 * to mmap. They read the 32-bit value from the config space base register, 662 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the 663 * offset parameter of mmap on /proc/bus/pci/XXX for that device. 664 * 665 * Returns negative error code on failure, zero on success. 666 */ 667 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 668 resource_size_t *offset, 669 enum pci_mmap_state mmap_state) 670 { 671 struct pci_controller *hose = pci_bus_to_host(dev->bus); 672 unsigned long io_offset = 0; 673 int i, res_bit; 674 675 if (hose == 0) 676 return NULL; /* should never happen */ 677 678 /* If memory, add on the PCI bridge address offset */ 679 if (mmap_state == pci_mmap_mem) { 680 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ 681 *offset += hose->pci_mem_offset; 682 #endif 683 res_bit = IORESOURCE_MEM; 684 } else { 685 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; 686 *offset += io_offset; 687 res_bit = IORESOURCE_IO; 688 } 689 690 /* 691 * Check that the offset requested corresponds to one of the 692 * resources of the device. 693 */ 694 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 695 struct resource *rp = &dev->resource[i]; 696 int flags = rp->flags; 697 698 /* treat ROM as memory (should be already) */ 699 if (i == PCI_ROM_RESOURCE) 700 flags |= IORESOURCE_MEM; 701 702 /* Active and same type? */ 703 if ((flags & res_bit) == 0) 704 continue; 705 706 /* In the range of this resource? */ 707 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) 708 continue; 709 710 /* found it! construct the final physical address */ 711 if (mmap_state == pci_mmap_io) 712 *offset += hose->io_base_phys - io_offset; 713 return rp; 714 } 715 716 return NULL; 717 } 718 719 /* 720 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 721 * device mapping. 722 */ 723 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, 724 pgprot_t protection, 725 enum pci_mmap_state mmap_state, 726 int write_combine) 727 { 728 unsigned long prot = pgprot_val(protection); 729 730 /* Write combine is always 0 on non-memory space mappings. On 731 * memory space, if the user didn't pass 1, we check for a 732 * "prefetchable" resource. This is a bit hackish, but we use 733 * this to workaround the inability of /sysfs to provide a write 734 * combine bit 735 */ 736 if (mmap_state != pci_mmap_mem) 737 write_combine = 0; 738 else if (write_combine == 0) { 739 if (rp->flags & IORESOURCE_PREFETCH) 740 write_combine = 1; 741 } 742 743 /* XXX would be nice to have a way to ask for write-through */ 744 prot |= _PAGE_NO_CACHE; 745 if (write_combine) 746 prot &= ~_PAGE_GUARDED; 747 else 748 prot |= _PAGE_GUARDED; 749 750 return __pgprot(prot); 751 } 752 753 /* 754 * This one is used by /dev/mem and fbdev who have no clue about the 755 * PCI device, it tries to find the PCI device first and calls the 756 * above routine 757 */ 758 pgprot_t pci_phys_mem_access_prot(struct file *file, 759 unsigned long pfn, 760 unsigned long size, 761 pgprot_t protection) 762 { 763 struct pci_dev *pdev = NULL; 764 struct resource *found = NULL; 765 unsigned long prot = pgprot_val(protection); 766 unsigned long offset = pfn << PAGE_SHIFT; 767 int i; 768 769 if (page_is_ram(pfn)) 770 return __pgprot(prot); 771 772 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 773 774 for_each_pci_dev(pdev) { 775 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 776 struct resource *rp = &pdev->resource[i]; 777 int flags = rp->flags; 778 779 /* Active and same type? */ 780 if ((flags & IORESOURCE_MEM) == 0) 781 continue; 782 /* In the range of this resource? */ 783 if (offset < (rp->start & PAGE_MASK) || 784 offset > rp->end) 785 continue; 786 found = rp; 787 break; 788 } 789 if (found) 790 break; 791 } 792 if (found) { 793 if (found->flags & IORESOURCE_PREFETCH) 794 prot &= ~_PAGE_GUARDED; 795 pci_dev_put(pdev); 796 } 797 798 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); 799 800 return __pgprot(prot); 801 } 802 803 804 /* 805 * Perform the actual remap of the pages for a PCI device mapping, as 806 * appropriate for this architecture. The region in the process to map 807 * is described by vm_start and vm_end members of VMA, the base physical 808 * address is found in vm_pgoff. 809 * The pci device structure is provided so that architectures may make mapping 810 * decisions on a per-device or per-bus basis. 811 * 812 * Returns a negative error code on failure, zero on success. 813 */ 814 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 815 enum pci_mmap_state mmap_state, int write_combine) 816 { 817 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT; 818 struct resource *rp; 819 int ret; 820 821 rp = __pci_mmap_make_offset(dev, &offset, mmap_state); 822 if (rp == NULL) 823 return -EINVAL; 824 825 vma->vm_pgoff = offset >> PAGE_SHIFT; 826 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 827 vma->vm_page_prot, 828 mmap_state, write_combine); 829 830 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 831 vma->vm_end - vma->vm_start, vma->vm_page_prot); 832 833 return ret; 834 } 835 836 static ssize_t pci_show_devspec(struct device *dev, 837 struct device_attribute *attr, char *buf) 838 { 839 struct pci_dev *pdev; 840 struct device_node *np; 841 842 pdev = to_pci_dev (dev); 843 np = pci_device_to_OF_node(pdev); 844 if (np == NULL || np->full_name == NULL) 845 return 0; 846 return sprintf(buf, "%s", np->full_name); 847 } 848 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 849 850 void pcibios_add_platform_entries(struct pci_dev *pdev) 851 { 852 device_create_file(&pdev->dev, &dev_attr_devspec); 853 } 854 855 void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 856 struct device_node *dev, int prim) 857 { 858 const unsigned int *ranges; 859 unsigned int pci_space; 860 unsigned long size; 861 int rlen = 0; 862 int memno = 0; 863 struct resource *res; 864 int np, na = of_n_addr_cells(dev); 865 unsigned long pci_addr, cpu_phys_addr; 866 867 np = na + 5; 868 869 /* From "PCI Binding to 1275" 870 * The ranges property is laid out as an array of elements, 871 * each of which comprises: 872 * cells 0 - 2: a PCI address 873 * cells 3 or 3+4: a CPU physical address 874 * (size depending on dev->n_addr_cells) 875 * cells 4+5 or 5+6: the size of the range 876 */ 877 ranges = of_get_property(dev, "ranges", &rlen); 878 if (ranges == NULL) 879 return; 880 hose->io_base_phys = 0; 881 while ((rlen -= np * sizeof(unsigned int)) >= 0) { 882 res = NULL; 883 pci_space = ranges[0]; 884 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 885 cpu_phys_addr = of_translate_address(dev, &ranges[3]); 886 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4]; 887 ranges += np; 888 if (size == 0) 889 continue; 890 891 /* Now consume following elements while they are contiguous */ 892 while (rlen >= np * sizeof(unsigned int)) { 893 unsigned long addr, phys; 894 895 if (ranges[0] != pci_space) 896 break; 897 addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 898 phys = ranges[3]; 899 if (na >= 2) 900 phys = (phys << 32) | ranges[4]; 901 if (addr != pci_addr + size || 902 phys != cpu_phys_addr + size) 903 break; 904 905 size += ((unsigned long)ranges[na+3] << 32) 906 | ranges[na+4]; 907 ranges += np; 908 rlen -= np * sizeof(unsigned int); 909 } 910 911 switch ((pci_space >> 24) & 0x3) { 912 case 1: /* I/O space */ 913 hose->io_base_phys = cpu_phys_addr - pci_addr; 914 /* handle from 0 to top of I/O window */ 915 hose->pci_io_size = pci_addr + size; 916 917 res = &hose->io_resource; 918 res->flags = IORESOURCE_IO; 919 res->start = pci_addr; 920 DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number, 921 res->start, res->start + size - 1); 922 break; 923 case 2: /* memory space */ 924 memno = 0; 925 while (memno < 3 && hose->mem_resources[memno].flags) 926 ++memno; 927 928 if (memno == 0) 929 hose->pci_mem_offset = cpu_phys_addr - pci_addr; 930 if (memno < 3) { 931 res = &hose->mem_resources[memno]; 932 res->flags = IORESOURCE_MEM; 933 res->start = cpu_phys_addr; 934 DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number, 935 res->start, res->start + size - 1); 936 } 937 break; 938 } 939 if (res != NULL) { 940 res->name = dev->full_name; 941 res->end = res->start + size - 1; 942 res->parent = NULL; 943 res->sibling = NULL; 944 res->child = NULL; 945 } 946 } 947 } 948 949 #ifdef CONFIG_HOTPLUG 950 951 int pcibios_unmap_io_space(struct pci_bus *bus) 952 { 953 struct pci_controller *hose; 954 955 WARN_ON(bus == NULL); 956 957 /* If this is not a PHB, we only flush the hash table over 958 * the area mapped by this bridge. We don't play with the PTE 959 * mappings since we might have to deal with sub-page alignemnts 960 * so flushing the hash table is the only sane way to make sure 961 * that no hash entries are covering that removed bridge area 962 * while still allowing other busses overlapping those pages 963 */ 964 if (bus->self) { 965 struct resource *res = bus->resource[0]; 966 967 DBG("IO unmapping for PCI-PCI bridge %s\n", 968 pci_name(bus->self)); 969 970 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, 971 res->end - res->start + 1); 972 return 0; 973 } 974 975 /* Get the host bridge */ 976 hose = pci_bus_to_host(bus); 977 978 /* Check if we have IOs allocated */ 979 if (hose->io_base_alloc == 0) 980 return 0; 981 982 DBG("IO unmapping for PHB %s\n", 983 ((struct device_node *)hose->arch_data)->full_name); 984 DBG(" alloc=0x%p\n", hose->io_base_alloc); 985 986 /* This is a PHB, we fully unmap the IO area */ 987 vunmap(hose->io_base_alloc); 988 989 return 0; 990 } 991 EXPORT_SYMBOL_GPL(pcibios_unmap_io_space); 992 993 #endif /* CONFIG_HOTPLUG */ 994 995 int __devinit pcibios_map_io_space(struct pci_bus *bus) 996 { 997 struct vm_struct *area; 998 unsigned long phys_page; 999 unsigned long size_page; 1000 unsigned long io_virt_offset; 1001 struct pci_controller *hose; 1002 1003 WARN_ON(bus == NULL); 1004 1005 /* If this not a PHB, nothing to do, page tables still exist and 1006 * thus HPTEs will be faulted in when needed 1007 */ 1008 if (bus->self) { 1009 DBG("IO mapping for PCI-PCI bridge %s\n", 1010 pci_name(bus->self)); 1011 DBG(" virt=0x%016lx...0x%016lx\n", 1012 bus->resource[0]->start + _IO_BASE, 1013 bus->resource[0]->end + _IO_BASE); 1014 return 0; 1015 } 1016 1017 /* Get the host bridge */ 1018 hose = pci_bus_to_host(bus); 1019 phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); 1020 size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE); 1021 1022 /* Make sure IO area address is clear */ 1023 hose->io_base_alloc = NULL; 1024 1025 /* If there's no IO to map on that bus, get away too */ 1026 if (hose->pci_io_size == 0 || hose->io_base_phys == 0) 1027 return 0; 1028 1029 /* Let's allocate some IO space for that guy. We don't pass 1030 * VM_IOREMAP because we don't care about alignment tricks that 1031 * the core does in that case. Maybe we should due to stupid card 1032 * with incomplete address decoding but I'd rather not deal with 1033 * those outside of the reserved 64K legacy region. 1034 */ 1035 area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END); 1036 if (area == NULL) 1037 return -ENOMEM; 1038 hose->io_base_alloc = area->addr; 1039 hose->io_base_virt = (void __iomem *)(area->addr + 1040 hose->io_base_phys - phys_page); 1041 1042 DBG("IO mapping for PHB %s\n", 1043 ((struct device_node *)hose->arch_data)->full_name); 1044 DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n", 1045 hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); 1046 DBG(" size=0x%016lx (alloc=0x%016lx)\n", 1047 hose->pci_io_size, size_page); 1048 1049 /* Establish the mapping */ 1050 if (__ioremap_at(phys_page, area->addr, size_page, 1051 _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL) 1052 return -ENOMEM; 1053 1054 /* Fixup hose IO resource */ 1055 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1056 hose->io_resource.start += io_virt_offset; 1057 hose->io_resource.end += io_virt_offset; 1058 1059 DBG(" hose->io_resource=0x%016lx...0x%016lx\n", 1060 hose->io_resource.start, hose->io_resource.end); 1061 1062 return 0; 1063 } 1064 EXPORT_SYMBOL_GPL(pcibios_map_io_space); 1065 1066 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) 1067 { 1068 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1069 unsigned long offset; 1070 1071 if (res->flags & IORESOURCE_IO) { 1072 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1073 res->start += offset; 1074 res->end += offset; 1075 } else if (res->flags & IORESOURCE_MEM) { 1076 res->start += hose->pci_mem_offset; 1077 res->end += hose->pci_mem_offset; 1078 } 1079 } 1080 1081 void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, 1082 struct pci_bus *bus) 1083 { 1084 /* Update device resources. */ 1085 int i; 1086 1087 DBG("%s: Fixup resources:\n", pci_name(dev)); 1088 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 1089 struct resource *res = &dev->resource[i]; 1090 if (!res->flags) 1091 continue; 1092 1093 DBG(" 0x%02x < %08lx:0x%016lx...0x%016lx\n", 1094 i, res->flags, res->start, res->end); 1095 1096 fixup_resource(res, dev); 1097 1098 DBG(" > %08lx:0x%016lx...0x%016lx\n", 1099 res->flags, res->start, res->end); 1100 } 1101 } 1102 EXPORT_SYMBOL(pcibios_fixup_device_resources); 1103 1104 void __devinit pcibios_setup_new_device(struct pci_dev *dev) 1105 { 1106 struct dev_archdata *sd = &dev->dev.archdata; 1107 1108 sd->of_node = pci_device_to_OF_node(dev); 1109 1110 DBG("PCI device %s OF node: %s\n", pci_name(dev), 1111 sd->of_node ? sd->of_node->full_name : "<none>"); 1112 1113 sd->dma_ops = pci_dma_ops; 1114 #ifdef CONFIG_NUMA 1115 sd->numa_node = pcibus_to_node(dev->bus); 1116 #else 1117 sd->numa_node = -1; 1118 #endif 1119 if (ppc_md.pci_dma_dev_setup) 1120 ppc_md.pci_dma_dev_setup(dev); 1121 } 1122 EXPORT_SYMBOL(pcibios_setup_new_device); 1123 1124 static void __devinit do_bus_setup(struct pci_bus *bus) 1125 { 1126 struct pci_dev *dev; 1127 1128 if (ppc_md.pci_dma_bus_setup) 1129 ppc_md.pci_dma_bus_setup(bus); 1130 1131 list_for_each_entry(dev, &bus->devices, bus_list) 1132 pcibios_setup_new_device(dev); 1133 1134 /* Read default IRQs and fixup if necessary */ 1135 list_for_each_entry(dev, &bus->devices, bus_list) { 1136 pci_read_irq_line(dev); 1137 if (ppc_md.pci_irq_fixup) 1138 ppc_md.pci_irq_fixup(dev); 1139 } 1140 } 1141 1142 void __devinit pcibios_fixup_bus(struct pci_bus *bus) 1143 { 1144 struct pci_dev *dev = bus->self; 1145 struct device_node *np; 1146 1147 np = pci_bus_to_OF_node(bus); 1148 1149 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>"); 1150 1151 if (dev && pci_probe_only && 1152 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 1153 /* This is a subordinate bridge */ 1154 1155 pci_read_bridge_bases(bus); 1156 pcibios_fixup_device_resources(dev, bus); 1157 } 1158 1159 do_bus_setup(bus); 1160 1161 if (!pci_probe_only) 1162 return; 1163 1164 list_for_each_entry(dev, &bus->devices, bus_list) 1165 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) 1166 pcibios_fixup_device_resources(dev, bus); 1167 } 1168 EXPORT_SYMBOL(pcibios_fixup_bus); 1169 1170 /* 1171 * Reads the interrupt pin to determine if interrupt is use by card. 1172 * If the interrupt is used, then gets the interrupt line from the 1173 * openfirmware and sets it in the pci_dev and pci_config line. 1174 */ 1175 int pci_read_irq_line(struct pci_dev *pci_dev) 1176 { 1177 struct of_irq oirq; 1178 unsigned int virq; 1179 1180 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1181 1182 #ifdef DEBUG 1183 memset(&oirq, 0xff, sizeof(oirq)); 1184 #endif 1185 /* Try to get a mapping from the device-tree */ 1186 if (of_irq_map_pci(pci_dev, &oirq)) { 1187 u8 line, pin; 1188 1189 /* If that fails, lets fallback to what is in the config 1190 * space and map that through the default controller. We 1191 * also set the type to level low since that's what PCI 1192 * interrupts are. If your platform does differently, then 1193 * either provide a proper interrupt tree or don't use this 1194 * function. 1195 */ 1196 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) 1197 return -1; 1198 if (pin == 0) 1199 return -1; 1200 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || 1201 line == 0xff) { 1202 return -1; 1203 } 1204 DBG(" -> no map ! Using irq line %d from PCI config\n", line); 1205 1206 virq = irq_create_mapping(NULL, line); 1207 if (virq != NO_IRQ) 1208 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 1209 } else { 1210 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 1211 oirq.size, oirq.specifier[0], oirq.specifier[1], 1212 oirq.controller->full_name); 1213 1214 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 1215 oirq.size); 1216 } 1217 if(virq == NO_IRQ) { 1218 DBG(" -> failed to map !\n"); 1219 return -1; 1220 } 1221 1222 DBG(" -> mapped to linux irq %d\n", virq); 1223 1224 pci_dev->irq = virq; 1225 1226 return 0; 1227 } 1228 EXPORT_SYMBOL(pci_read_irq_line); 1229 1230 void pci_resource_to_user(const struct pci_dev *dev, int bar, 1231 const struct resource *rsrc, 1232 resource_size_t *start, resource_size_t *end) 1233 { 1234 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1235 resource_size_t offset = 0; 1236 1237 if (hose == NULL) 1238 return; 1239 1240 if (rsrc->flags & IORESOURCE_IO) 1241 offset = (unsigned long)hose->io_base_virt - _IO_BASE; 1242 1243 /* We pass a fully fixed up address to userland for MMIO instead of 1244 * a BAR value because X is lame and expects to be able to use that 1245 * to pass to /dev/mem ! 1246 * 1247 * That means that we'll have potentially 64 bits values where some 1248 * userland apps only expect 32 (like X itself since it thinks only 1249 * Sparc has 64 bits MMIO) but if we don't do that, we break it on 1250 * 32 bits CHRPs :-( 1251 * 1252 * Hopefully, the sysfs insterface is immune to that gunk. Once X 1253 * has been fixed (and the fix spread enough), we can re-enable the 1254 * 2 lines below and pass down a BAR value to userland. In that case 1255 * we'll also have to re-enable the matching code in 1256 * __pci_mmap_make_offset(). 1257 * 1258 * BenH. 1259 */ 1260 #if 0 1261 else if (rsrc->flags & IORESOURCE_MEM) 1262 offset = hose->pci_mem_offset; 1263 #endif 1264 1265 *start = rsrc->start - offset; 1266 *end = rsrc->end - offset; 1267 } 1268 1269 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) 1270 { 1271 if (!have_of) 1272 return NULL; 1273 while(node) { 1274 struct pci_controller *hose, *tmp; 1275 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1276 if (hose->arch_data == node) 1277 return hose; 1278 node = node->parent; 1279 } 1280 return NULL; 1281 } 1282 1283 unsigned long pci_address_to_pio(phys_addr_t address) 1284 { 1285 struct pci_controller *hose, *tmp; 1286 1287 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 1288 if (address >= hose->io_base_phys && 1289 address < (hose->io_base_phys + hose->pci_io_size)) { 1290 unsigned long base = 1291 (unsigned long)hose->io_base_virt - _IO_BASE; 1292 return base + (address - hose->io_base_phys); 1293 } 1294 } 1295 return (unsigned int)-1; 1296 } 1297 EXPORT_SYMBOL_GPL(pci_address_to_pio); 1298 1299 1300 #define IOBASE_BRIDGE_NUMBER 0 1301 #define IOBASE_MEMORY 1 1302 #define IOBASE_IO 2 1303 #define IOBASE_ISA_IO 3 1304 #define IOBASE_ISA_MEM 4 1305 1306 long sys_pciconfig_iobase(long which, unsigned long in_bus, 1307 unsigned long in_devfn) 1308 { 1309 struct pci_controller* hose; 1310 struct list_head *ln; 1311 struct pci_bus *bus = NULL; 1312 struct device_node *hose_node; 1313 1314 /* Argh ! Please forgive me for that hack, but that's the 1315 * simplest way to get existing XFree to not lockup on some 1316 * G5 machines... So when something asks for bus 0 io base 1317 * (bus 0 is HT root), we return the AGP one instead. 1318 */ 1319 if (machine_is_compatible("MacRISC4")) 1320 if (in_bus == 0) 1321 in_bus = 0xf0; 1322 1323 /* That syscall isn't quite compatible with PCI domains, but it's 1324 * used on pre-domains setup. We return the first match 1325 */ 1326 1327 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { 1328 bus = pci_bus_b(ln); 1329 if (in_bus >= bus->number && in_bus <= bus->subordinate) 1330 break; 1331 bus = NULL; 1332 } 1333 if (bus == NULL || bus->sysdata == NULL) 1334 return -ENODEV; 1335 1336 hose_node = (struct device_node *)bus->sysdata; 1337 hose = PCI_DN(hose_node)->phb; 1338 1339 switch (which) { 1340 case IOBASE_BRIDGE_NUMBER: 1341 return (long)hose->first_busno; 1342 case IOBASE_MEMORY: 1343 return (long)hose->pci_mem_offset; 1344 case IOBASE_IO: 1345 return (long)hose->io_base_phys; 1346 case IOBASE_ISA_IO: 1347 return (long)isa_io_base; 1348 case IOBASE_ISA_MEM: 1349 return -EINVAL; 1350 } 1351 1352 return -EOPNOTSUPP; 1353 } 1354 1355 #ifdef CONFIG_NUMA 1356 int pcibus_to_node(struct pci_bus *bus) 1357 { 1358 struct pci_controller *phb = pci_bus_to_host(bus); 1359 return phb->node; 1360 } 1361 EXPORT_SYMBOL(pcibus_to_node); 1362 #endif 1363