1 /* 2 * Port for PPC64 David Engebretsen, IBM Corp. 3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. 4 * 5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM 6 * Rework, based on alpha PCI code. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #undef DEBUG 15 16 #include <linux/kernel.h> 17 #include <linux/pci.h> 18 #include <linux/string.h> 19 #include <linux/init.h> 20 #include <linux/bootmem.h> 21 #include <linux/mm.h> 22 #include <linux/list.h> 23 #include <linux/syscalls.h> 24 #include <linux/irq.h> 25 26 #include <asm/processor.h> 27 #include <asm/io.h> 28 #include <asm/prom.h> 29 #include <asm/pci-bridge.h> 30 #include <asm/byteorder.h> 31 #include <asm/machdep.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/firmware.h> 34 35 #ifdef DEBUG 36 #include <asm/udbg.h> 37 #define DBG(fmt...) printk(fmt) 38 #else 39 #define DBG(fmt...) 40 #endif 41 42 unsigned long pci_probe_only = 1; 43 int pci_assign_all_buses = 0; 44 45 static void fixup_resource(struct resource *res, struct pci_dev *dev); 46 static void do_bus_setup(struct pci_bus *bus); 47 static void phbs_remap_io(void); 48 49 /* pci_io_base -- the base address from which io bars are offsets. 50 * This is the lowest I/O base address (so bar values are always positive), 51 * and it *must* be the start of ISA space if an ISA bus exists because 52 * ISA drivers use hard coded offsets. If no ISA bus exists a dummy 53 * page is mapped and isa_io_limit prevents access to it. 54 */ 55 unsigned long isa_io_base; /* NULL if no ISA bus */ 56 EXPORT_SYMBOL(isa_io_base); 57 unsigned long pci_io_base; 58 EXPORT_SYMBOL(pci_io_base); 59 60 void iSeries_pcibios_init(void); 61 62 LIST_HEAD(hose_list); 63 64 struct dma_mapping_ops *pci_dma_ops; 65 EXPORT_SYMBOL(pci_dma_ops); 66 67 int global_phb_number; /* Global phb counter */ 68 69 /* Cached ISA bridge dev. */ 70 struct pci_dev *ppc64_isabridge_dev = NULL; 71 EXPORT_SYMBOL_GPL(ppc64_isabridge_dev); 72 73 static void fixup_broken_pcnet32(struct pci_dev* dev) 74 { 75 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { 76 dev->vendor = PCI_VENDOR_ID_AMD; 77 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); 78 } 79 } 80 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); 81 82 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 83 struct resource *res) 84 { 85 unsigned long offset = 0; 86 struct pci_controller *hose = pci_bus_to_host(dev->bus); 87 88 if (!hose) 89 return; 90 91 if (res->flags & IORESOURCE_IO) 92 offset = (unsigned long)hose->io_base_virt - pci_io_base; 93 94 if (res->flags & IORESOURCE_MEM) 95 offset = hose->pci_mem_offset; 96 97 region->start = res->start - offset; 98 region->end = res->end - offset; 99 } 100 101 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 102 struct pci_bus_region *region) 103 { 104 unsigned long offset = 0; 105 struct pci_controller *hose = pci_bus_to_host(dev->bus); 106 107 if (!hose) 108 return; 109 110 if (res->flags & IORESOURCE_IO) 111 offset = (unsigned long)hose->io_base_virt - pci_io_base; 112 113 if (res->flags & IORESOURCE_MEM) 114 offset = hose->pci_mem_offset; 115 116 res->start = region->start + offset; 117 res->end = region->end + offset; 118 } 119 120 #ifdef CONFIG_HOTPLUG 121 EXPORT_SYMBOL(pcibios_resource_to_bus); 122 EXPORT_SYMBOL(pcibios_bus_to_resource); 123 #endif 124 125 /* 126 * We need to avoid collisions with `mirrored' VGA ports 127 * and other strange ISA hardware, so we always want the 128 * addresses to be allocated in the 0x000-0x0ff region 129 * modulo 0x400. 130 * 131 * Why? Because some silly external IO cards only decode 132 * the low 10 bits of the IO address. The 0x00-0xff region 133 * is reserved for motherboard devices that decode all 16 134 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, 135 * but we want to try to avoid allocating at 0x2900-0x2bff 136 * which might have be mirrored at 0x0100-0x03ff.. 137 */ 138 void pcibios_align_resource(void *data, struct resource *res, 139 resource_size_t size, resource_size_t align) 140 { 141 struct pci_dev *dev = data; 142 struct pci_controller *hose = pci_bus_to_host(dev->bus); 143 resource_size_t start = res->start; 144 unsigned long alignto; 145 146 if (res->flags & IORESOURCE_IO) { 147 unsigned long offset = (unsigned long)hose->io_base_virt - 148 pci_io_base; 149 /* Make sure we start at our min on all hoses */ 150 if (start - offset < PCIBIOS_MIN_IO) 151 start = PCIBIOS_MIN_IO + offset; 152 153 /* 154 * Put everything into 0x00-0xff region modulo 0x400 155 */ 156 if (start & 0x300) 157 start = (start + 0x3ff) & ~0x3ff; 158 159 } else if (res->flags & IORESOURCE_MEM) { 160 /* Make sure we start at our min on all hoses */ 161 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM) 162 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset; 163 164 /* Align to multiple of size of minimum base. */ 165 alignto = max(0x1000UL, align); 166 start = ALIGN(start, alignto); 167 } 168 169 res->start = start; 170 } 171 172 static DEFINE_SPINLOCK(hose_spinlock); 173 174 /* 175 * pci_controller(phb) initialized common variables. 176 */ 177 static void __devinit pci_setup_pci_controller(struct pci_controller *hose) 178 { 179 memset(hose, 0, sizeof(struct pci_controller)); 180 181 spin_lock(&hose_spinlock); 182 hose->global_number = global_phb_number++; 183 list_add_tail(&hose->list_node, &hose_list); 184 spin_unlock(&hose_spinlock); 185 } 186 187 struct pci_controller * pcibios_alloc_controller(struct device_node *dev) 188 { 189 struct pci_controller *phb; 190 191 if (mem_init_done) 192 phb = kmalloc(sizeof(struct pci_controller), GFP_KERNEL); 193 else 194 phb = alloc_bootmem(sizeof (struct pci_controller)); 195 if (phb == NULL) 196 return NULL; 197 pci_setup_pci_controller(phb); 198 phb->arch_data = dev; 199 phb->is_dynamic = mem_init_done; 200 if (dev) { 201 int nid = of_node_to_nid(dev); 202 203 if (nid < 0 || !node_online(nid)) 204 nid = -1; 205 206 PHB_SET_NODE(phb, nid); 207 } 208 return phb; 209 } 210 211 void pcibios_free_controller(struct pci_controller *phb) 212 { 213 spin_lock(&hose_spinlock); 214 list_del(&phb->list_node); 215 spin_unlock(&hose_spinlock); 216 217 if (phb->is_dynamic) 218 kfree(phb); 219 } 220 221 void __devinit pcibios_claim_one_bus(struct pci_bus *b) 222 { 223 struct pci_dev *dev; 224 struct pci_bus *child_bus; 225 226 list_for_each_entry(dev, &b->devices, bus_list) { 227 int i; 228 229 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 230 struct resource *r = &dev->resource[i]; 231 232 if (r->parent || !r->start || !r->flags) 233 continue; 234 pci_claim_resource(dev, i); 235 } 236 } 237 238 list_for_each_entry(child_bus, &b->children, node) 239 pcibios_claim_one_bus(child_bus); 240 } 241 #ifdef CONFIG_HOTPLUG 242 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 243 #endif 244 245 static void __init pcibios_claim_of_setup(void) 246 { 247 struct pci_bus *b; 248 249 if (firmware_has_feature(FW_FEATURE_ISERIES)) 250 return; 251 252 list_for_each_entry(b, &pci_root_buses, node) 253 pcibios_claim_one_bus(b); 254 } 255 256 static u32 get_int_prop(struct device_node *np, const char *name, u32 def) 257 { 258 const u32 *prop; 259 int len; 260 261 prop = get_property(np, name, &len); 262 if (prop && len >= 4) 263 return *prop; 264 return def; 265 } 266 267 static unsigned int pci_parse_of_flags(u32 addr0) 268 { 269 unsigned int flags = 0; 270 271 if (addr0 & 0x02000000) { 272 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; 273 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; 274 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; 275 if (addr0 & 0x40000000) 276 flags |= IORESOURCE_PREFETCH 277 | PCI_BASE_ADDRESS_MEM_PREFETCH; 278 } else if (addr0 & 0x01000000) 279 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; 280 return flags; 281 } 282 283 #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) 284 285 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) 286 { 287 u64 base, size; 288 unsigned int flags; 289 struct resource *res; 290 const u32 *addrs; 291 u32 i; 292 int proplen; 293 294 addrs = get_property(node, "assigned-addresses", &proplen); 295 if (!addrs) 296 return; 297 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 298 for (; proplen >= 20; proplen -= 20, addrs += 5) { 299 flags = pci_parse_of_flags(addrs[0]); 300 if (!flags) 301 continue; 302 base = GET_64BIT(addrs, 1); 303 size = GET_64BIT(addrs, 3); 304 if (!size) 305 continue; 306 i = addrs[0] & 0xff; 307 DBG(" base: %llx, size: %llx, i: %x\n", 308 (unsigned long long)base, (unsigned long long)size, i); 309 310 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 311 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 312 } else if (i == dev->rom_base_reg) { 313 res = &dev->resource[PCI_ROM_RESOURCE]; 314 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; 315 } else { 316 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); 317 continue; 318 } 319 res->start = base; 320 res->end = base + size - 1; 321 res->flags = flags; 322 res->name = pci_name(dev); 323 fixup_resource(res, dev); 324 } 325 } 326 327 struct pci_dev *of_create_pci_dev(struct device_node *node, 328 struct pci_bus *bus, int devfn) 329 { 330 struct pci_dev *dev; 331 const char *type; 332 333 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); 334 if (!dev) 335 return NULL; 336 type = get_property(node, "device_type", NULL); 337 if (type == NULL) 338 type = ""; 339 340 DBG(" create device, devfn: %x, type: %s\n", devfn, type); 341 342 dev->bus = bus; 343 dev->sysdata = node; 344 dev->dev.parent = bus->bridge; 345 dev->dev.bus = &pci_bus_type; 346 dev->devfn = devfn; 347 dev->multifunction = 0; /* maybe a lie? */ 348 349 dev->vendor = get_int_prop(node, "vendor-id", 0xffff); 350 dev->device = get_int_prop(node, "device-id", 0xffff); 351 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); 352 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0); 353 354 dev->cfg_size = pci_cfg_space_size(dev); 355 356 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 357 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 358 dev->class = get_int_prop(node, "class-code", 0); 359 360 DBG(" class: 0x%x\n", dev->class); 361 362 dev->current_state = 4; /* unknown power state */ 363 dev->error_state = pci_channel_io_normal; 364 365 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { 366 /* a PCI-PCI bridge */ 367 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; 368 dev->rom_base_reg = PCI_ROM_ADDRESS1; 369 } else if (!strcmp(type, "cardbus")) { 370 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; 371 } else { 372 dev->hdr_type = PCI_HEADER_TYPE_NORMAL; 373 dev->rom_base_reg = PCI_ROM_ADDRESS; 374 /* Maybe do a default OF mapping here */ 375 dev->irq = NO_IRQ; 376 } 377 378 pci_parse_of_addrs(node, dev); 379 380 DBG(" adding to system ...\n"); 381 382 pci_device_add(dev, bus); 383 384 return dev; 385 } 386 EXPORT_SYMBOL(of_create_pci_dev); 387 388 void __devinit of_scan_bus(struct device_node *node, 389 struct pci_bus *bus) 390 { 391 struct device_node *child = NULL; 392 const u32 *reg; 393 int reglen, devfn; 394 struct pci_dev *dev; 395 396 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); 397 398 while ((child = of_get_next_child(node, child)) != NULL) { 399 DBG(" * %s\n", child->full_name); 400 reg = get_property(child, "reg", ®len); 401 if (reg == NULL || reglen < 20) 402 continue; 403 devfn = (reg[0] >> 8) & 0xff; 404 405 /* create a new pci_dev for this device */ 406 dev = of_create_pci_dev(child, bus, devfn); 407 if (!dev) 408 continue; 409 DBG("dev header type: %x\n", dev->hdr_type); 410 411 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 412 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 413 of_scan_pci_bridge(child, dev); 414 } 415 416 do_bus_setup(bus); 417 } 418 EXPORT_SYMBOL(of_scan_bus); 419 420 void __devinit of_scan_pci_bridge(struct device_node *node, 421 struct pci_dev *dev) 422 { 423 struct pci_bus *bus; 424 const u32 *busrange, *ranges; 425 int len, i, mode; 426 struct resource *res; 427 unsigned int flags; 428 u64 size; 429 430 DBG("of_scan_pci_bridge(%s)\n", node->full_name); 431 432 /* parse bus-range property */ 433 busrange = get_property(node, "bus-range", &len); 434 if (busrange == NULL || len != 8) { 435 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", 436 node->full_name); 437 return; 438 } 439 ranges = get_property(node, "ranges", &len); 440 if (ranges == NULL) { 441 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", 442 node->full_name); 443 return; 444 } 445 446 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 447 if (!bus) { 448 printk(KERN_ERR "Failed to create pci bus for %s\n", 449 node->full_name); 450 return; 451 } 452 453 bus->primary = dev->bus->number; 454 bus->subordinate = busrange[1]; 455 bus->bridge_ctl = 0; 456 bus->sysdata = node; 457 458 /* parse ranges property */ 459 /* PCI #address-cells == 3 and #size-cells == 2 always */ 460 res = &dev->resource[PCI_BRIDGE_RESOURCES]; 461 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { 462 res->flags = 0; 463 bus->resource[i] = res; 464 ++res; 465 } 466 i = 1; 467 for (; len >= 32; len -= 32, ranges += 8) { 468 flags = pci_parse_of_flags(ranges[0]); 469 size = GET_64BIT(ranges, 6); 470 if (flags == 0 || size == 0) 471 continue; 472 if (flags & IORESOURCE_IO) { 473 res = bus->resource[0]; 474 if (res->flags) { 475 printk(KERN_ERR "PCI: ignoring extra I/O range" 476 " for bridge %s\n", node->full_name); 477 continue; 478 } 479 } else { 480 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { 481 printk(KERN_ERR "PCI: too many memory ranges" 482 " for bridge %s\n", node->full_name); 483 continue; 484 } 485 res = bus->resource[i]; 486 ++i; 487 } 488 res->start = GET_64BIT(ranges, 1); 489 res->end = res->start + size - 1; 490 res->flags = flags; 491 fixup_resource(res, dev); 492 } 493 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 494 bus->number); 495 DBG(" bus name: %s\n", bus->name); 496 497 mode = PCI_PROBE_NORMAL; 498 if (ppc_md.pci_probe_mode) 499 mode = ppc_md.pci_probe_mode(bus); 500 DBG(" probe mode: %d\n", mode); 501 502 if (mode == PCI_PROBE_DEVTREE) 503 of_scan_bus(node, bus); 504 else if (mode == PCI_PROBE_NORMAL) 505 pci_scan_child_bus(bus); 506 } 507 EXPORT_SYMBOL(of_scan_pci_bridge); 508 509 void __devinit scan_phb(struct pci_controller *hose) 510 { 511 struct pci_bus *bus; 512 struct device_node *node = hose->arch_data; 513 int i, mode; 514 struct resource *res; 515 516 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); 517 518 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); 519 if (bus == NULL) { 520 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", 521 hose->global_number); 522 return; 523 } 524 bus->secondary = hose->first_busno; 525 hose->bus = bus; 526 527 bus->resource[0] = res = &hose->io_resource; 528 if (res->flags && request_resource(&ioport_resource, res)) 529 printk(KERN_ERR "Failed to request PCI IO region " 530 "on PCI domain %04x\n", hose->global_number); 531 532 for (i = 0; i < 3; ++i) { 533 res = &hose->mem_resources[i]; 534 bus->resource[i+1] = res; 535 if (res->flags && request_resource(&iomem_resource, res)) 536 printk(KERN_ERR "Failed to request PCI memory region " 537 "on PCI domain %04x\n", hose->global_number); 538 } 539 540 mode = PCI_PROBE_NORMAL; 541 542 if (node && ppc_md.pci_probe_mode) 543 mode = ppc_md.pci_probe_mode(bus); 544 DBG(" probe mode: %d\n", mode); 545 if (mode == PCI_PROBE_DEVTREE) { 546 bus->subordinate = hose->last_busno; 547 of_scan_bus(node, bus); 548 } 549 550 if (mode == PCI_PROBE_NORMAL) 551 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); 552 } 553 554 static int __init pcibios_init(void) 555 { 556 struct pci_controller *hose, *tmp; 557 558 /* For now, override phys_mem_access_prot. If we need it, 559 * later, we may move that initialization to each ppc_md 560 */ 561 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 562 563 if (firmware_has_feature(FW_FEATURE_ISERIES)) 564 iSeries_pcibios_init(); 565 566 printk(KERN_DEBUG "PCI: Probing PCI hardware\n"); 567 568 /* Scan all of the recorded PCI controllers. */ 569 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 570 scan_phb(hose); 571 pci_bus_add_devices(hose->bus); 572 } 573 574 if (!firmware_has_feature(FW_FEATURE_ISERIES)) { 575 if (pci_probe_only) 576 pcibios_claim_of_setup(); 577 else 578 /* FIXME: `else' will be removed when 579 pci_assign_unassigned_resources() is able to work 580 correctly with [partially] allocated PCI tree. */ 581 pci_assign_unassigned_resources(); 582 } 583 584 /* Call machine dependent final fixup */ 585 if (ppc_md.pcibios_fixup) 586 ppc_md.pcibios_fixup(); 587 588 /* Cache the location of the ISA bridge (if we have one) */ 589 ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 590 if (ppc64_isabridge_dev != NULL) 591 printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev)); 592 593 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 594 /* map in PCI I/O space */ 595 phbs_remap_io(); 596 597 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); 598 599 return 0; 600 } 601 602 subsys_initcall(pcibios_init); 603 604 char __init *pcibios_setup(char *str) 605 { 606 return str; 607 } 608 609 int pcibios_enable_device(struct pci_dev *dev, int mask) 610 { 611 u16 cmd, oldcmd; 612 int i; 613 614 pci_read_config_word(dev, PCI_COMMAND, &cmd); 615 oldcmd = cmd; 616 617 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 618 struct resource *res = &dev->resource[i]; 619 620 /* Only set up the requested stuff */ 621 if (!(mask & (1<<i))) 622 continue; 623 624 if (res->flags & IORESOURCE_IO) 625 cmd |= PCI_COMMAND_IO; 626 if (res->flags & IORESOURCE_MEM) 627 cmd |= PCI_COMMAND_MEMORY; 628 } 629 630 if (cmd != oldcmd) { 631 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", 632 pci_name(dev), cmd); 633 /* Enable the appropriate bits in the PCI command register. */ 634 pci_write_config_word(dev, PCI_COMMAND, cmd); 635 } 636 return 0; 637 } 638 639 /* 640 * Return the domain number for this bus. 641 */ 642 int pci_domain_nr(struct pci_bus *bus) 643 { 644 if (firmware_has_feature(FW_FEATURE_ISERIES)) 645 return 0; 646 else { 647 struct pci_controller *hose = pci_bus_to_host(bus); 648 649 return hose->global_number; 650 } 651 } 652 653 EXPORT_SYMBOL(pci_domain_nr); 654 655 /* Decide whether to display the domain number in /proc */ 656 int pci_proc_domain(struct pci_bus *bus) 657 { 658 if (firmware_has_feature(FW_FEATURE_ISERIES)) 659 return 0; 660 else { 661 struct pci_controller *hose = pci_bus_to_host(bus); 662 return hose->buid; 663 } 664 } 665 666 /* 667 * Platform support for /proc/bus/pci/X/Y mmap()s, 668 * modelled on the sparc64 implementation by Dave Miller. 669 * -- paulus. 670 */ 671 672 /* 673 * Adjust vm_pgoff of VMA such that it is the physical page offset 674 * corresponding to the 32-bit pci bus offset for DEV requested by the user. 675 * 676 * Basically, the user finds the base address for his device which he wishes 677 * to mmap. They read the 32-bit value from the config space base register, 678 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the 679 * offset parameter of mmap on /proc/bus/pci/XXX for that device. 680 * 681 * Returns negative error code on failure, zero on success. 682 */ 683 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 684 resource_size_t *offset, 685 enum pci_mmap_state mmap_state) 686 { 687 struct pci_controller *hose = pci_bus_to_host(dev->bus); 688 unsigned long io_offset = 0; 689 int i, res_bit; 690 691 if (hose == 0) 692 return NULL; /* should never happen */ 693 694 /* If memory, add on the PCI bridge address offset */ 695 if (mmap_state == pci_mmap_mem) { 696 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ 697 *offset += hose->pci_mem_offset; 698 #endif 699 res_bit = IORESOURCE_MEM; 700 } else { 701 io_offset = (unsigned long)hose->io_base_virt - pci_io_base; 702 *offset += io_offset; 703 res_bit = IORESOURCE_IO; 704 } 705 706 /* 707 * Check that the offset requested corresponds to one of the 708 * resources of the device. 709 */ 710 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 711 struct resource *rp = &dev->resource[i]; 712 int flags = rp->flags; 713 714 /* treat ROM as memory (should be already) */ 715 if (i == PCI_ROM_RESOURCE) 716 flags |= IORESOURCE_MEM; 717 718 /* Active and same type? */ 719 if ((flags & res_bit) == 0) 720 continue; 721 722 /* In the range of this resource? */ 723 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) 724 continue; 725 726 /* found it! construct the final physical address */ 727 if (mmap_state == pci_mmap_io) 728 *offset += hose->io_base_phys - io_offset; 729 return rp; 730 } 731 732 return NULL; 733 } 734 735 /* 736 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 737 * device mapping. 738 */ 739 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, 740 pgprot_t protection, 741 enum pci_mmap_state mmap_state, 742 int write_combine) 743 { 744 unsigned long prot = pgprot_val(protection); 745 746 /* Write combine is always 0 on non-memory space mappings. On 747 * memory space, if the user didn't pass 1, we check for a 748 * "prefetchable" resource. This is a bit hackish, but we use 749 * this to workaround the inability of /sysfs to provide a write 750 * combine bit 751 */ 752 if (mmap_state != pci_mmap_mem) 753 write_combine = 0; 754 else if (write_combine == 0) { 755 if (rp->flags & IORESOURCE_PREFETCH) 756 write_combine = 1; 757 } 758 759 /* XXX would be nice to have a way to ask for write-through */ 760 prot |= _PAGE_NO_CACHE; 761 if (write_combine) 762 prot &= ~_PAGE_GUARDED; 763 else 764 prot |= _PAGE_GUARDED; 765 766 return __pgprot(prot); 767 } 768 769 /* 770 * This one is used by /dev/mem and fbdev who have no clue about the 771 * PCI device, it tries to find the PCI device first and calls the 772 * above routine 773 */ 774 pgprot_t pci_phys_mem_access_prot(struct file *file, 775 unsigned long pfn, 776 unsigned long size, 777 pgprot_t protection) 778 { 779 struct pci_dev *pdev = NULL; 780 struct resource *found = NULL; 781 unsigned long prot = pgprot_val(protection); 782 unsigned long offset = pfn << PAGE_SHIFT; 783 int i; 784 785 if (page_is_ram(pfn)) 786 return __pgprot(prot); 787 788 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 789 790 for_each_pci_dev(pdev) { 791 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 792 struct resource *rp = &pdev->resource[i]; 793 int flags = rp->flags; 794 795 /* Active and same type? */ 796 if ((flags & IORESOURCE_MEM) == 0) 797 continue; 798 /* In the range of this resource? */ 799 if (offset < (rp->start & PAGE_MASK) || 800 offset > rp->end) 801 continue; 802 found = rp; 803 break; 804 } 805 if (found) 806 break; 807 } 808 if (found) { 809 if (found->flags & IORESOURCE_PREFETCH) 810 prot &= ~_PAGE_GUARDED; 811 pci_dev_put(pdev); 812 } 813 814 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); 815 816 return __pgprot(prot); 817 } 818 819 820 /* 821 * Perform the actual remap of the pages for a PCI device mapping, as 822 * appropriate for this architecture. The region in the process to map 823 * is described by vm_start and vm_end members of VMA, the base physical 824 * address is found in vm_pgoff. 825 * The pci device structure is provided so that architectures may make mapping 826 * decisions on a per-device or per-bus basis. 827 * 828 * Returns a negative error code on failure, zero on success. 829 */ 830 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 831 enum pci_mmap_state mmap_state, int write_combine) 832 { 833 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT; 834 struct resource *rp; 835 int ret; 836 837 rp = __pci_mmap_make_offset(dev, &offset, mmap_state); 838 if (rp == NULL) 839 return -EINVAL; 840 841 vma->vm_pgoff = offset >> PAGE_SHIFT; 842 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 843 vma->vm_page_prot, 844 mmap_state, write_combine); 845 846 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 847 vma->vm_end - vma->vm_start, vma->vm_page_prot); 848 849 return ret; 850 } 851 852 static ssize_t pci_show_devspec(struct device *dev, 853 struct device_attribute *attr, char *buf) 854 { 855 struct pci_dev *pdev; 856 struct device_node *np; 857 858 pdev = to_pci_dev (dev); 859 np = pci_device_to_OF_node(pdev); 860 if (np == NULL || np->full_name == NULL) 861 return 0; 862 return sprintf(buf, "%s", np->full_name); 863 } 864 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 865 866 void pcibios_add_platform_entries(struct pci_dev *pdev) 867 { 868 device_create_file(&pdev->dev, &dev_attr_devspec); 869 } 870 871 #define ISA_SPACE_MASK 0x1 872 #define ISA_SPACE_IO 0x1 873 874 static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, 875 unsigned long phb_io_base_phys, 876 void __iomem * phb_io_base_virt) 877 { 878 /* Remove these asap */ 879 880 struct pci_address { 881 u32 a_hi; 882 u32 a_mid; 883 u32 a_lo; 884 }; 885 886 struct isa_address { 887 u32 a_hi; 888 u32 a_lo; 889 }; 890 891 struct isa_range { 892 struct isa_address isa_addr; 893 struct pci_address pci_addr; 894 unsigned int size; 895 }; 896 897 const struct isa_range *range; 898 unsigned long pci_addr; 899 unsigned int isa_addr; 900 unsigned int size; 901 int rlen = 0; 902 903 range = get_property(isa_node, "ranges", &rlen); 904 if (range == NULL || (rlen < sizeof(struct isa_range))) { 905 printk(KERN_ERR "no ISA ranges or unexpected isa range size," 906 "mapping 64k\n"); 907 __ioremap_explicit(phb_io_base_phys, 908 (unsigned long)phb_io_base_virt, 909 0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED); 910 return; 911 } 912 913 /* From "ISA Binding to 1275" 914 * The ranges property is laid out as an array of elements, 915 * each of which comprises: 916 * cells 0 - 1: an ISA address 917 * cells 2 - 4: a PCI address 918 * (size depending on dev->n_addr_cells) 919 * cell 5: the size of the range 920 */ 921 if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) { 922 isa_addr = range->isa_addr.a_lo; 923 pci_addr = (unsigned long) range->pci_addr.a_mid << 32 | 924 range->pci_addr.a_lo; 925 926 /* Assume these are both zero */ 927 if ((pci_addr != 0) || (isa_addr != 0)) { 928 printk(KERN_ERR "unexpected isa to pci mapping: %s\n", 929 __FUNCTION__); 930 return; 931 } 932 933 size = PAGE_ALIGN(range->size); 934 935 __ioremap_explicit(phb_io_base_phys, 936 (unsigned long) phb_io_base_virt, 937 size, _PAGE_NO_CACHE | _PAGE_GUARDED); 938 } 939 } 940 941 void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 942 struct device_node *dev, int prim) 943 { 944 const unsigned int *ranges; 945 unsigned int pci_space; 946 unsigned long size; 947 int rlen = 0; 948 int memno = 0; 949 struct resource *res; 950 int np, na = prom_n_addr_cells(dev); 951 unsigned long pci_addr, cpu_phys_addr; 952 953 np = na + 5; 954 955 /* From "PCI Binding to 1275" 956 * The ranges property is laid out as an array of elements, 957 * each of which comprises: 958 * cells 0 - 2: a PCI address 959 * cells 3 or 3+4: a CPU physical address 960 * (size depending on dev->n_addr_cells) 961 * cells 4+5 or 5+6: the size of the range 962 */ 963 ranges = get_property(dev, "ranges", &rlen); 964 if (ranges == NULL) 965 return; 966 hose->io_base_phys = 0; 967 while ((rlen -= np * sizeof(unsigned int)) >= 0) { 968 res = NULL; 969 pci_space = ranges[0]; 970 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 971 cpu_phys_addr = of_translate_address(dev, &ranges[3]); 972 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4]; 973 ranges += np; 974 if (size == 0) 975 continue; 976 977 /* Now consume following elements while they are contiguous */ 978 while (rlen >= np * sizeof(unsigned int)) { 979 unsigned long addr, phys; 980 981 if (ranges[0] != pci_space) 982 break; 983 addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 984 phys = ranges[3]; 985 if (na >= 2) 986 phys = (phys << 32) | ranges[4]; 987 if (addr != pci_addr + size || 988 phys != cpu_phys_addr + size) 989 break; 990 991 size += ((unsigned long)ranges[na+3] << 32) 992 | ranges[na+4]; 993 ranges += np; 994 rlen -= np * sizeof(unsigned int); 995 } 996 997 switch ((pci_space >> 24) & 0x3) { 998 case 1: /* I/O space */ 999 hose->io_base_phys = cpu_phys_addr; 1000 hose->pci_io_size = size; 1001 1002 res = &hose->io_resource; 1003 res->flags = IORESOURCE_IO; 1004 res->start = pci_addr; 1005 DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number, 1006 res->start, res->start + size - 1); 1007 break; 1008 case 2: /* memory space */ 1009 memno = 0; 1010 while (memno < 3 && hose->mem_resources[memno].flags) 1011 ++memno; 1012 1013 if (memno == 0) 1014 hose->pci_mem_offset = cpu_phys_addr - pci_addr; 1015 if (memno < 3) { 1016 res = &hose->mem_resources[memno]; 1017 res->flags = IORESOURCE_MEM; 1018 res->start = cpu_phys_addr; 1019 DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number, 1020 res->start, res->start + size - 1); 1021 } 1022 break; 1023 } 1024 if (res != NULL) { 1025 res->name = dev->full_name; 1026 res->end = res->start + size - 1; 1027 res->parent = NULL; 1028 res->sibling = NULL; 1029 res->child = NULL; 1030 } 1031 } 1032 } 1033 1034 void __init pci_setup_phb_io(struct pci_controller *hose, int primary) 1035 { 1036 unsigned long size = hose->pci_io_size; 1037 unsigned long io_virt_offset; 1038 struct resource *res; 1039 struct device_node *isa_dn; 1040 1041 hose->io_base_virt = reserve_phb_iospace(size); 1042 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", 1043 hose->global_number, hose->io_base_phys, 1044 (unsigned long) hose->io_base_virt); 1045 1046 if (primary) { 1047 pci_io_base = (unsigned long)hose->io_base_virt; 1048 isa_dn = of_find_node_by_type(NULL, "isa"); 1049 if (isa_dn) { 1050 isa_io_base = pci_io_base; 1051 pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys, 1052 hose->io_base_virt); 1053 of_node_put(isa_dn); 1054 } 1055 } 1056 1057 io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; 1058 res = &hose->io_resource; 1059 res->start += io_virt_offset; 1060 res->end += io_virt_offset; 1061 } 1062 1063 void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose, 1064 int primary) 1065 { 1066 unsigned long size = hose->pci_io_size; 1067 unsigned long io_virt_offset; 1068 struct resource *res; 1069 1070 hose->io_base_virt = __ioremap(hose->io_base_phys, size, 1071 _PAGE_NO_CACHE | _PAGE_GUARDED); 1072 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", 1073 hose->global_number, hose->io_base_phys, 1074 (unsigned long) hose->io_base_virt); 1075 1076 if (primary) 1077 pci_io_base = (unsigned long)hose->io_base_virt; 1078 1079 io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; 1080 res = &hose->io_resource; 1081 res->start += io_virt_offset; 1082 res->end += io_virt_offset; 1083 } 1084 1085 1086 static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys, 1087 unsigned long *start_virt, unsigned long *size) 1088 { 1089 struct pci_controller *hose = pci_bus_to_host(bus); 1090 struct pci_bus_region region; 1091 struct resource *res; 1092 1093 if (bus->self) { 1094 res = bus->resource[0]; 1095 pcibios_resource_to_bus(bus->self, ®ion, res); 1096 *start_phys = hose->io_base_phys + region.start; 1097 *start_virt = (unsigned long) hose->io_base_virt + 1098 region.start; 1099 if (region.end > region.start) 1100 *size = region.end - region.start + 1; 1101 else { 1102 printk("%s(): unexpected region 0x%lx->0x%lx\n", 1103 __FUNCTION__, region.start, region.end); 1104 return 1; 1105 } 1106 1107 } else { 1108 /* Root Bus */ 1109 res = &hose->io_resource; 1110 *start_phys = hose->io_base_phys; 1111 *start_virt = (unsigned long) hose->io_base_virt; 1112 if (res->end > res->start) 1113 *size = res->end - res->start + 1; 1114 else { 1115 printk("%s(): unexpected region 0x%lx->0x%lx\n", 1116 __FUNCTION__, res->start, res->end); 1117 return 1; 1118 } 1119 } 1120 1121 return 0; 1122 } 1123 1124 int unmap_bus_range(struct pci_bus *bus) 1125 { 1126 unsigned long start_phys; 1127 unsigned long start_virt; 1128 unsigned long size; 1129 1130 if (!bus) { 1131 printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); 1132 return 1; 1133 } 1134 1135 if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) 1136 return 1; 1137 if (__iounmap_explicit((void __iomem *) start_virt, size)) 1138 return 1; 1139 1140 return 0; 1141 } 1142 EXPORT_SYMBOL(unmap_bus_range); 1143 1144 int remap_bus_range(struct pci_bus *bus) 1145 { 1146 unsigned long start_phys; 1147 unsigned long start_virt; 1148 unsigned long size; 1149 1150 if (!bus) { 1151 printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); 1152 return 1; 1153 } 1154 1155 1156 if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) 1157 return 1; 1158 if (start_phys == 0) 1159 return 1; 1160 printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size); 1161 if (__ioremap_explicit(start_phys, start_virt, size, 1162 _PAGE_NO_CACHE | _PAGE_GUARDED)) 1163 return 1; 1164 1165 return 0; 1166 } 1167 EXPORT_SYMBOL(remap_bus_range); 1168 1169 static void phbs_remap_io(void) 1170 { 1171 struct pci_controller *hose, *tmp; 1172 1173 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1174 remap_bus_range(hose->bus); 1175 } 1176 1177 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) 1178 { 1179 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1180 unsigned long offset; 1181 1182 if (res->flags & IORESOURCE_IO) { 1183 offset = (unsigned long)hose->io_base_virt - pci_io_base; 1184 1185 res->start += offset; 1186 res->end += offset; 1187 } else if (res->flags & IORESOURCE_MEM) { 1188 res->start += hose->pci_mem_offset; 1189 res->end += hose->pci_mem_offset; 1190 } 1191 } 1192 1193 void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, 1194 struct pci_bus *bus) 1195 { 1196 /* Update device resources. */ 1197 int i; 1198 1199 for (i = 0; i < PCI_NUM_RESOURCES; i++) 1200 if (dev->resource[i].flags) 1201 fixup_resource(&dev->resource[i], dev); 1202 } 1203 EXPORT_SYMBOL(pcibios_fixup_device_resources); 1204 1205 void __devinit pcibios_setup_new_device(struct pci_dev *dev) 1206 { 1207 struct dev_archdata *sd = &dev->dev.archdata; 1208 1209 sd->of_node = pci_device_to_OF_node(dev); 1210 1211 DBG("PCI device %s OF node: %s\n", pci_name(dev), 1212 sd->of_node ? sd->of_node->full_name : "<none>"); 1213 1214 sd->dma_ops = pci_dma_ops; 1215 #ifdef CONFIG_NUMA 1216 sd->numa_node = pcibus_to_node(dev->bus); 1217 #else 1218 sd->numa_node = -1; 1219 #endif 1220 if (ppc_md.pci_dma_dev_setup) 1221 ppc_md.pci_dma_dev_setup(dev); 1222 } 1223 EXPORT_SYMBOL(pcibios_setup_new_device); 1224 1225 static void __devinit do_bus_setup(struct pci_bus *bus) 1226 { 1227 struct pci_dev *dev; 1228 1229 if (ppc_md.pci_dma_bus_setup) 1230 ppc_md.pci_dma_bus_setup(bus); 1231 1232 list_for_each_entry(dev, &bus->devices, bus_list) 1233 pcibios_setup_new_device(dev); 1234 1235 /* Read default IRQs and fixup if necessary */ 1236 list_for_each_entry(dev, &bus->devices, bus_list) { 1237 pci_read_irq_line(dev); 1238 if (ppc_md.pci_irq_fixup) 1239 ppc_md.pci_irq_fixup(dev); 1240 } 1241 } 1242 1243 void __devinit pcibios_fixup_bus(struct pci_bus *bus) 1244 { 1245 struct pci_dev *dev = bus->self; 1246 struct device_node *np; 1247 1248 np = pci_bus_to_OF_node(bus); 1249 1250 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>"); 1251 1252 if (dev && pci_probe_only && 1253 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 1254 /* This is a subordinate bridge */ 1255 1256 pci_read_bridge_bases(bus); 1257 pcibios_fixup_device_resources(dev, bus); 1258 } 1259 1260 do_bus_setup(bus); 1261 1262 if (!pci_probe_only) 1263 return; 1264 1265 list_for_each_entry(dev, &bus->devices, bus_list) 1266 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) 1267 pcibios_fixup_device_resources(dev, bus); 1268 } 1269 EXPORT_SYMBOL(pcibios_fixup_bus); 1270 1271 /* 1272 * Reads the interrupt pin to determine if interrupt is use by card. 1273 * If the interrupt is used, then gets the interrupt line from the 1274 * openfirmware and sets it in the pci_dev and pci_config line. 1275 */ 1276 int pci_read_irq_line(struct pci_dev *pci_dev) 1277 { 1278 struct of_irq oirq; 1279 unsigned int virq; 1280 1281 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1282 1283 #ifdef DEBUG 1284 memset(&oirq, 0xff, sizeof(oirq)); 1285 #endif 1286 /* Try to get a mapping from the device-tree */ 1287 if (of_irq_map_pci(pci_dev, &oirq)) { 1288 u8 line, pin; 1289 1290 /* If that fails, lets fallback to what is in the config 1291 * space and map that through the default controller. We 1292 * also set the type to level low since that's what PCI 1293 * interrupts are. If your platform does differently, then 1294 * either provide a proper interrupt tree or don't use this 1295 * function. 1296 */ 1297 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) 1298 return -1; 1299 if (pin == 0) 1300 return -1; 1301 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || 1302 line == 0xff) { 1303 return -1; 1304 } 1305 DBG(" -> no map ! Using irq line %d from PCI config\n", line); 1306 1307 virq = irq_create_mapping(NULL, line); 1308 if (virq != NO_IRQ) 1309 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 1310 } else { 1311 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 1312 oirq.size, oirq.specifier[0], oirq.specifier[1], 1313 oirq.controller->full_name); 1314 1315 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 1316 oirq.size); 1317 } 1318 if(virq == NO_IRQ) { 1319 DBG(" -> failed to map !\n"); 1320 return -1; 1321 } 1322 1323 DBG(" -> mapped to linux irq %d\n", virq); 1324 1325 pci_dev->irq = virq; 1326 1327 return 0; 1328 } 1329 EXPORT_SYMBOL(pci_read_irq_line); 1330 1331 void pci_resource_to_user(const struct pci_dev *dev, int bar, 1332 const struct resource *rsrc, 1333 resource_size_t *start, resource_size_t *end) 1334 { 1335 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1336 resource_size_t offset = 0; 1337 1338 if (hose == NULL) 1339 return; 1340 1341 if (rsrc->flags & IORESOURCE_IO) 1342 offset = (unsigned long)hose->io_base_virt - pci_io_base; 1343 1344 /* We pass a fully fixed up address to userland for MMIO instead of 1345 * a BAR value because X is lame and expects to be able to use that 1346 * to pass to /dev/mem ! 1347 * 1348 * That means that we'll have potentially 64 bits values where some 1349 * userland apps only expect 32 (like X itself since it thinks only 1350 * Sparc has 64 bits MMIO) but if we don't do that, we break it on 1351 * 32 bits CHRPs :-( 1352 * 1353 * Hopefully, the sysfs insterface is immune to that gunk. Once X 1354 * has been fixed (and the fix spread enough), we can re-enable the 1355 * 2 lines below and pass down a BAR value to userland. In that case 1356 * we'll also have to re-enable the matching code in 1357 * __pci_mmap_make_offset(). 1358 * 1359 * BenH. 1360 */ 1361 #if 0 1362 else if (rsrc->flags & IORESOURCE_MEM) 1363 offset = hose->pci_mem_offset; 1364 #endif 1365 1366 *start = rsrc->start - offset; 1367 *end = rsrc->end - offset; 1368 } 1369 1370 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) 1371 { 1372 if (!have_of) 1373 return NULL; 1374 while(node) { 1375 struct pci_controller *hose, *tmp; 1376 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1377 if (hose->arch_data == node) 1378 return hose; 1379 node = node->parent; 1380 } 1381 return NULL; 1382 } 1383 1384 unsigned long pci_address_to_pio(phys_addr_t address) 1385 { 1386 struct pci_controller *hose, *tmp; 1387 1388 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 1389 if (address >= hose->io_base_phys && 1390 address < (hose->io_base_phys + hose->pci_io_size)) { 1391 unsigned long base = 1392 (unsigned long)hose->io_base_virt - pci_io_base; 1393 return base + (address - hose->io_base_phys); 1394 } 1395 } 1396 return (unsigned int)-1; 1397 } 1398 EXPORT_SYMBOL_GPL(pci_address_to_pio); 1399 1400 1401 #define IOBASE_BRIDGE_NUMBER 0 1402 #define IOBASE_MEMORY 1 1403 #define IOBASE_IO 2 1404 #define IOBASE_ISA_IO 3 1405 #define IOBASE_ISA_MEM 4 1406 1407 long sys_pciconfig_iobase(long which, unsigned long in_bus, 1408 unsigned long in_devfn) 1409 { 1410 struct pci_controller* hose; 1411 struct list_head *ln; 1412 struct pci_bus *bus = NULL; 1413 struct device_node *hose_node; 1414 1415 /* Argh ! Please forgive me for that hack, but that's the 1416 * simplest way to get existing XFree to not lockup on some 1417 * G5 machines... So when something asks for bus 0 io base 1418 * (bus 0 is HT root), we return the AGP one instead. 1419 */ 1420 if (machine_is_compatible("MacRISC4")) 1421 if (in_bus == 0) 1422 in_bus = 0xf0; 1423 1424 /* That syscall isn't quite compatible with PCI domains, but it's 1425 * used on pre-domains setup. We return the first match 1426 */ 1427 1428 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { 1429 bus = pci_bus_b(ln); 1430 if (in_bus >= bus->number && in_bus <= bus->subordinate) 1431 break; 1432 bus = NULL; 1433 } 1434 if (bus == NULL || bus->sysdata == NULL) 1435 return -ENODEV; 1436 1437 hose_node = (struct device_node *)bus->sysdata; 1438 hose = PCI_DN(hose_node)->phb; 1439 1440 switch (which) { 1441 case IOBASE_BRIDGE_NUMBER: 1442 return (long)hose->first_busno; 1443 case IOBASE_MEMORY: 1444 return (long)hose->pci_mem_offset; 1445 case IOBASE_IO: 1446 return (long)hose->io_base_phys; 1447 case IOBASE_ISA_IO: 1448 return (long)isa_io_base; 1449 case IOBASE_ISA_MEM: 1450 return -EINVAL; 1451 } 1452 1453 return -EOPNOTSUPP; 1454 } 1455 1456 #ifdef CONFIG_NUMA 1457 int pcibus_to_node(struct pci_bus *bus) 1458 { 1459 struct pci_controller *phb = pci_bus_to_host(bus); 1460 return phb->node; 1461 } 1462 EXPORT_SYMBOL(pcibus_to_node); 1463 #endif 1464