1 /* 2 * Port for PPC64 David Engebretsen, IBM Corp. 3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. 4 * 5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM 6 * Rework, based on alpha PCI code. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14 #undef DEBUG 15 16 #include <linux/kernel.h> 17 #include <linux/pci.h> 18 #include <linux/string.h> 19 #include <linux/init.h> 20 #include <linux/bootmem.h> 21 #include <linux/mm.h> 22 #include <linux/list.h> 23 #include <linux/syscalls.h> 24 #include <linux/irq.h> 25 26 #include <asm/processor.h> 27 #include <asm/io.h> 28 #include <asm/prom.h> 29 #include <asm/pci-bridge.h> 30 #include <asm/byteorder.h> 31 #include <asm/machdep.h> 32 #include <asm/ppc-pci.h> 33 #include <asm/firmware.h> 34 35 #ifdef DEBUG 36 #include <asm/udbg.h> 37 #define DBG(fmt...) printk(fmt) 38 #else 39 #define DBG(fmt...) 40 #endif 41 42 unsigned long pci_probe_only = 1; 43 int pci_assign_all_buses = 0; 44 45 static void fixup_resource(struct resource *res, struct pci_dev *dev); 46 static void do_bus_setup(struct pci_bus *bus); 47 static void phbs_remap_io(void); 48 49 /* pci_io_base -- the base address from which io bars are offsets. 50 * This is the lowest I/O base address (so bar values are always positive), 51 * and it *must* be the start of ISA space if an ISA bus exists because 52 * ISA drivers use hard coded offsets. If no ISA bus exists a dummy 53 * page is mapped and isa_io_limit prevents access to it. 54 */ 55 unsigned long isa_io_base; /* NULL if no ISA bus */ 56 EXPORT_SYMBOL(isa_io_base); 57 unsigned long pci_io_base; 58 EXPORT_SYMBOL(pci_io_base); 59 60 void iSeries_pcibios_init(void); 61 62 LIST_HEAD(hose_list); 63 64 static struct dma_mapping_ops *pci_dma_ops; 65 66 int global_phb_number; /* Global phb counter */ 67 68 /* Cached ISA bridge dev. */ 69 struct pci_dev *ppc64_isabridge_dev = NULL; 70 EXPORT_SYMBOL_GPL(ppc64_isabridge_dev); 71 72 void set_pci_dma_ops(struct dma_mapping_ops *dma_ops) 73 { 74 pci_dma_ops = dma_ops; 75 } 76 77 struct dma_mapping_ops *get_pci_dma_ops(void) 78 { 79 return pci_dma_ops; 80 } 81 EXPORT_SYMBOL(get_pci_dma_ops); 82 83 static void fixup_broken_pcnet32(struct pci_dev* dev) 84 { 85 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { 86 dev->vendor = PCI_VENDOR_ID_AMD; 87 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); 88 } 89 } 90 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); 91 92 void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 93 struct resource *res) 94 { 95 unsigned long offset = 0; 96 struct pci_controller *hose = pci_bus_to_host(dev->bus); 97 98 if (!hose) 99 return; 100 101 if (res->flags & IORESOURCE_IO) 102 offset = (unsigned long)hose->io_base_virt - pci_io_base; 103 104 if (res->flags & IORESOURCE_MEM) 105 offset = hose->pci_mem_offset; 106 107 region->start = res->start - offset; 108 region->end = res->end - offset; 109 } 110 111 void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 112 struct pci_bus_region *region) 113 { 114 unsigned long offset = 0; 115 struct pci_controller *hose = pci_bus_to_host(dev->bus); 116 117 if (!hose) 118 return; 119 120 if (res->flags & IORESOURCE_IO) 121 offset = (unsigned long)hose->io_base_virt - pci_io_base; 122 123 if (res->flags & IORESOURCE_MEM) 124 offset = hose->pci_mem_offset; 125 126 res->start = region->start + offset; 127 res->end = region->end + offset; 128 } 129 130 #ifdef CONFIG_HOTPLUG 131 EXPORT_SYMBOL(pcibios_resource_to_bus); 132 EXPORT_SYMBOL(pcibios_bus_to_resource); 133 #endif 134 135 /* 136 * We need to avoid collisions with `mirrored' VGA ports 137 * and other strange ISA hardware, so we always want the 138 * addresses to be allocated in the 0x000-0x0ff region 139 * modulo 0x400. 140 * 141 * Why? Because some silly external IO cards only decode 142 * the low 10 bits of the IO address. The 0x00-0xff region 143 * is reserved for motherboard devices that decode all 16 144 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, 145 * but we want to try to avoid allocating at 0x2900-0x2bff 146 * which might have be mirrored at 0x0100-0x03ff.. 147 */ 148 void pcibios_align_resource(void *data, struct resource *res, 149 resource_size_t size, resource_size_t align) 150 { 151 struct pci_dev *dev = data; 152 struct pci_controller *hose = pci_bus_to_host(dev->bus); 153 resource_size_t start = res->start; 154 unsigned long alignto; 155 156 if (res->flags & IORESOURCE_IO) { 157 unsigned long offset = (unsigned long)hose->io_base_virt - 158 pci_io_base; 159 /* Make sure we start at our min on all hoses */ 160 if (start - offset < PCIBIOS_MIN_IO) 161 start = PCIBIOS_MIN_IO + offset; 162 163 /* 164 * Put everything into 0x00-0xff region modulo 0x400 165 */ 166 if (start & 0x300) 167 start = (start + 0x3ff) & ~0x3ff; 168 169 } else if (res->flags & IORESOURCE_MEM) { 170 /* Make sure we start at our min on all hoses */ 171 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM) 172 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset; 173 174 /* Align to multiple of size of minimum base. */ 175 alignto = max(0x1000UL, align); 176 start = ALIGN(start, alignto); 177 } 178 179 res->start = start; 180 } 181 182 static DEFINE_SPINLOCK(hose_spinlock); 183 184 /* 185 * pci_controller(phb) initialized common variables. 186 */ 187 static void __devinit pci_setup_pci_controller(struct pci_controller *hose) 188 { 189 memset(hose, 0, sizeof(struct pci_controller)); 190 191 spin_lock(&hose_spinlock); 192 hose->global_number = global_phb_number++; 193 list_add_tail(&hose->list_node, &hose_list); 194 spin_unlock(&hose_spinlock); 195 } 196 197 struct pci_controller * pcibios_alloc_controller(struct device_node *dev) 198 { 199 struct pci_controller *phb; 200 201 if (mem_init_done) 202 phb = kmalloc(sizeof(struct pci_controller), GFP_KERNEL); 203 else 204 phb = alloc_bootmem(sizeof (struct pci_controller)); 205 if (phb == NULL) 206 return NULL; 207 pci_setup_pci_controller(phb); 208 phb->arch_data = dev; 209 phb->is_dynamic = mem_init_done; 210 if (dev) { 211 int nid = of_node_to_nid(dev); 212 213 if (nid < 0 || !node_online(nid)) 214 nid = -1; 215 216 PHB_SET_NODE(phb, nid); 217 } 218 return phb; 219 } 220 221 void pcibios_free_controller(struct pci_controller *phb) 222 { 223 spin_lock(&hose_spinlock); 224 list_del(&phb->list_node); 225 spin_unlock(&hose_spinlock); 226 227 if (phb->is_dynamic) 228 kfree(phb); 229 } 230 231 void __devinit pcibios_claim_one_bus(struct pci_bus *b) 232 { 233 struct pci_dev *dev; 234 struct pci_bus *child_bus; 235 236 list_for_each_entry(dev, &b->devices, bus_list) { 237 int i; 238 239 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 240 struct resource *r = &dev->resource[i]; 241 242 if (r->parent || !r->start || !r->flags) 243 continue; 244 pci_claim_resource(dev, i); 245 } 246 } 247 248 list_for_each_entry(child_bus, &b->children, node) 249 pcibios_claim_one_bus(child_bus); 250 } 251 #ifdef CONFIG_HOTPLUG 252 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 253 #endif 254 255 static void __init pcibios_claim_of_setup(void) 256 { 257 struct pci_bus *b; 258 259 if (firmware_has_feature(FW_FEATURE_ISERIES)) 260 return; 261 262 list_for_each_entry(b, &pci_root_buses, node) 263 pcibios_claim_one_bus(b); 264 } 265 266 static u32 get_int_prop(struct device_node *np, const char *name, u32 def) 267 { 268 const u32 *prop; 269 int len; 270 271 prop = of_get_property(np, name, &len); 272 if (prop && len >= 4) 273 return *prop; 274 return def; 275 } 276 277 static unsigned int pci_parse_of_flags(u32 addr0) 278 { 279 unsigned int flags = 0; 280 281 if (addr0 & 0x02000000) { 282 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; 283 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; 284 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; 285 if (addr0 & 0x40000000) 286 flags |= IORESOURCE_PREFETCH 287 | PCI_BASE_ADDRESS_MEM_PREFETCH; 288 } else if (addr0 & 0x01000000) 289 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; 290 return flags; 291 } 292 293 #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) 294 295 static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) 296 { 297 u64 base, size; 298 unsigned int flags; 299 struct resource *res; 300 const u32 *addrs; 301 u32 i; 302 int proplen; 303 304 addrs = of_get_property(node, "assigned-addresses", &proplen); 305 if (!addrs) 306 return; 307 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 308 for (; proplen >= 20; proplen -= 20, addrs += 5) { 309 flags = pci_parse_of_flags(addrs[0]); 310 if (!flags) 311 continue; 312 base = GET_64BIT(addrs, 1); 313 size = GET_64BIT(addrs, 3); 314 if (!size) 315 continue; 316 i = addrs[0] & 0xff; 317 DBG(" base: %llx, size: %llx, i: %x\n", 318 (unsigned long long)base, (unsigned long long)size, i); 319 320 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 321 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 322 } else if (i == dev->rom_base_reg) { 323 res = &dev->resource[PCI_ROM_RESOURCE]; 324 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; 325 } else { 326 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); 327 continue; 328 } 329 res->start = base; 330 res->end = base + size - 1; 331 res->flags = flags; 332 res->name = pci_name(dev); 333 fixup_resource(res, dev); 334 } 335 } 336 337 struct pci_dev *of_create_pci_dev(struct device_node *node, 338 struct pci_bus *bus, int devfn) 339 { 340 struct pci_dev *dev; 341 const char *type; 342 343 dev = alloc_pci_dev(); 344 if (!dev) 345 return NULL; 346 type = of_get_property(node, "device_type", NULL); 347 if (type == NULL) 348 type = ""; 349 350 DBG(" create device, devfn: %x, type: %s\n", devfn, type); 351 352 dev->bus = bus; 353 dev->sysdata = node; 354 dev->dev.parent = bus->bridge; 355 dev->dev.bus = &pci_bus_type; 356 dev->devfn = devfn; 357 dev->multifunction = 0; /* maybe a lie? */ 358 359 dev->vendor = get_int_prop(node, "vendor-id", 0xffff); 360 dev->device = get_int_prop(node, "device-id", 0xffff); 361 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); 362 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0); 363 364 dev->cfg_size = pci_cfg_space_size(dev); 365 366 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 367 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 368 dev->class = get_int_prop(node, "class-code", 0); 369 370 DBG(" class: 0x%x\n", dev->class); 371 372 dev->current_state = 4; /* unknown power state */ 373 dev->error_state = pci_channel_io_normal; 374 375 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { 376 /* a PCI-PCI bridge */ 377 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; 378 dev->rom_base_reg = PCI_ROM_ADDRESS1; 379 } else if (!strcmp(type, "cardbus")) { 380 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; 381 } else { 382 dev->hdr_type = PCI_HEADER_TYPE_NORMAL; 383 dev->rom_base_reg = PCI_ROM_ADDRESS; 384 /* Maybe do a default OF mapping here */ 385 dev->irq = NO_IRQ; 386 } 387 388 pci_parse_of_addrs(node, dev); 389 390 DBG(" adding to system ...\n"); 391 392 pci_device_add(dev, bus); 393 394 return dev; 395 } 396 EXPORT_SYMBOL(of_create_pci_dev); 397 398 void __devinit of_scan_bus(struct device_node *node, 399 struct pci_bus *bus) 400 { 401 struct device_node *child = NULL; 402 const u32 *reg; 403 int reglen, devfn; 404 struct pci_dev *dev; 405 406 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); 407 408 while ((child = of_get_next_child(node, child)) != NULL) { 409 DBG(" * %s\n", child->full_name); 410 reg = of_get_property(child, "reg", ®len); 411 if (reg == NULL || reglen < 20) 412 continue; 413 devfn = (reg[0] >> 8) & 0xff; 414 415 /* create a new pci_dev for this device */ 416 dev = of_create_pci_dev(child, bus, devfn); 417 if (!dev) 418 continue; 419 DBG("dev header type: %x\n", dev->hdr_type); 420 421 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 422 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 423 of_scan_pci_bridge(child, dev); 424 } 425 426 do_bus_setup(bus); 427 } 428 EXPORT_SYMBOL(of_scan_bus); 429 430 void __devinit of_scan_pci_bridge(struct device_node *node, 431 struct pci_dev *dev) 432 { 433 struct pci_bus *bus; 434 const u32 *busrange, *ranges; 435 int len, i, mode; 436 struct resource *res; 437 unsigned int flags; 438 u64 size; 439 440 DBG("of_scan_pci_bridge(%s)\n", node->full_name); 441 442 /* parse bus-range property */ 443 busrange = of_get_property(node, "bus-range", &len); 444 if (busrange == NULL || len != 8) { 445 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", 446 node->full_name); 447 return; 448 } 449 ranges = of_get_property(node, "ranges", &len); 450 if (ranges == NULL) { 451 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", 452 node->full_name); 453 return; 454 } 455 456 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 457 if (!bus) { 458 printk(KERN_ERR "Failed to create pci bus for %s\n", 459 node->full_name); 460 return; 461 } 462 463 bus->primary = dev->bus->number; 464 bus->subordinate = busrange[1]; 465 bus->bridge_ctl = 0; 466 bus->sysdata = node; 467 468 /* parse ranges property */ 469 /* PCI #address-cells == 3 and #size-cells == 2 always */ 470 res = &dev->resource[PCI_BRIDGE_RESOURCES]; 471 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { 472 res->flags = 0; 473 bus->resource[i] = res; 474 ++res; 475 } 476 i = 1; 477 for (; len >= 32; len -= 32, ranges += 8) { 478 flags = pci_parse_of_flags(ranges[0]); 479 size = GET_64BIT(ranges, 6); 480 if (flags == 0 || size == 0) 481 continue; 482 if (flags & IORESOURCE_IO) { 483 res = bus->resource[0]; 484 if (res->flags) { 485 printk(KERN_ERR "PCI: ignoring extra I/O range" 486 " for bridge %s\n", node->full_name); 487 continue; 488 } 489 } else { 490 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { 491 printk(KERN_ERR "PCI: too many memory ranges" 492 " for bridge %s\n", node->full_name); 493 continue; 494 } 495 res = bus->resource[i]; 496 ++i; 497 } 498 res->start = GET_64BIT(ranges, 1); 499 res->end = res->start + size - 1; 500 res->flags = flags; 501 fixup_resource(res, dev); 502 } 503 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 504 bus->number); 505 DBG(" bus name: %s\n", bus->name); 506 507 mode = PCI_PROBE_NORMAL; 508 if (ppc_md.pci_probe_mode) 509 mode = ppc_md.pci_probe_mode(bus); 510 DBG(" probe mode: %d\n", mode); 511 512 if (mode == PCI_PROBE_DEVTREE) 513 of_scan_bus(node, bus); 514 else if (mode == PCI_PROBE_NORMAL) 515 pci_scan_child_bus(bus); 516 } 517 EXPORT_SYMBOL(of_scan_pci_bridge); 518 519 void __devinit scan_phb(struct pci_controller *hose) 520 { 521 struct pci_bus *bus; 522 struct device_node *node = hose->arch_data; 523 int i, mode; 524 struct resource *res; 525 526 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); 527 528 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); 529 if (bus == NULL) { 530 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", 531 hose->global_number); 532 return; 533 } 534 bus->secondary = hose->first_busno; 535 hose->bus = bus; 536 537 bus->resource[0] = res = &hose->io_resource; 538 if (res->flags && request_resource(&ioport_resource, res)) 539 printk(KERN_ERR "Failed to request PCI IO region " 540 "on PCI domain %04x\n", hose->global_number); 541 542 for (i = 0; i < 3; ++i) { 543 res = &hose->mem_resources[i]; 544 bus->resource[i+1] = res; 545 if (res->flags && request_resource(&iomem_resource, res)) 546 printk(KERN_ERR "Failed to request PCI memory region " 547 "on PCI domain %04x\n", hose->global_number); 548 } 549 550 mode = PCI_PROBE_NORMAL; 551 552 if (node && ppc_md.pci_probe_mode) 553 mode = ppc_md.pci_probe_mode(bus); 554 DBG(" probe mode: %d\n", mode); 555 if (mode == PCI_PROBE_DEVTREE) { 556 bus->subordinate = hose->last_busno; 557 of_scan_bus(node, bus); 558 } 559 560 if (mode == PCI_PROBE_NORMAL) 561 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); 562 } 563 564 static int __init pcibios_init(void) 565 { 566 struct pci_controller *hose, *tmp; 567 568 /* For now, override phys_mem_access_prot. If we need it, 569 * later, we may move that initialization to each ppc_md 570 */ 571 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 572 573 if (firmware_has_feature(FW_FEATURE_ISERIES)) 574 iSeries_pcibios_init(); 575 576 printk(KERN_DEBUG "PCI: Probing PCI hardware\n"); 577 578 /* Scan all of the recorded PCI controllers. */ 579 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 580 scan_phb(hose); 581 pci_bus_add_devices(hose->bus); 582 } 583 584 if (!firmware_has_feature(FW_FEATURE_ISERIES)) { 585 if (pci_probe_only) 586 pcibios_claim_of_setup(); 587 else 588 /* FIXME: `else' will be removed when 589 pci_assign_unassigned_resources() is able to work 590 correctly with [partially] allocated PCI tree. */ 591 pci_assign_unassigned_resources(); 592 } 593 594 /* Call machine dependent final fixup */ 595 if (ppc_md.pcibios_fixup) 596 ppc_md.pcibios_fixup(); 597 598 /* Cache the location of the ISA bridge (if we have one) */ 599 ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 600 if (ppc64_isabridge_dev != NULL) 601 printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev)); 602 603 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 604 /* map in PCI I/O space */ 605 phbs_remap_io(); 606 607 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); 608 609 return 0; 610 } 611 612 subsys_initcall(pcibios_init); 613 614 char __init *pcibios_setup(char *str) 615 { 616 return str; 617 } 618 619 int pcibios_enable_device(struct pci_dev *dev, int mask) 620 { 621 u16 cmd, oldcmd; 622 int i; 623 624 pci_read_config_word(dev, PCI_COMMAND, &cmd); 625 oldcmd = cmd; 626 627 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 628 struct resource *res = &dev->resource[i]; 629 630 /* Only set up the requested stuff */ 631 if (!(mask & (1<<i))) 632 continue; 633 634 if (res->flags & IORESOURCE_IO) 635 cmd |= PCI_COMMAND_IO; 636 if (res->flags & IORESOURCE_MEM) 637 cmd |= PCI_COMMAND_MEMORY; 638 } 639 640 if (cmd != oldcmd) { 641 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", 642 pci_name(dev), cmd); 643 /* Enable the appropriate bits in the PCI command register. */ 644 pci_write_config_word(dev, PCI_COMMAND, cmd); 645 } 646 return 0; 647 } 648 649 /* 650 * Return the domain number for this bus. 651 */ 652 int pci_domain_nr(struct pci_bus *bus) 653 { 654 if (firmware_has_feature(FW_FEATURE_ISERIES)) 655 return 0; 656 else { 657 struct pci_controller *hose = pci_bus_to_host(bus); 658 659 return hose->global_number; 660 } 661 } 662 663 EXPORT_SYMBOL(pci_domain_nr); 664 665 /* Decide whether to display the domain number in /proc */ 666 int pci_proc_domain(struct pci_bus *bus) 667 { 668 if (firmware_has_feature(FW_FEATURE_ISERIES)) 669 return 0; 670 else { 671 struct pci_controller *hose = pci_bus_to_host(bus); 672 return hose->buid; 673 } 674 } 675 676 /* 677 * Platform support for /proc/bus/pci/X/Y mmap()s, 678 * modelled on the sparc64 implementation by Dave Miller. 679 * -- paulus. 680 */ 681 682 /* 683 * Adjust vm_pgoff of VMA such that it is the physical page offset 684 * corresponding to the 32-bit pci bus offset for DEV requested by the user. 685 * 686 * Basically, the user finds the base address for his device which he wishes 687 * to mmap. They read the 32-bit value from the config space base register, 688 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the 689 * offset parameter of mmap on /proc/bus/pci/XXX for that device. 690 * 691 * Returns negative error code on failure, zero on success. 692 */ 693 static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 694 resource_size_t *offset, 695 enum pci_mmap_state mmap_state) 696 { 697 struct pci_controller *hose = pci_bus_to_host(dev->bus); 698 unsigned long io_offset = 0; 699 int i, res_bit; 700 701 if (hose == 0) 702 return NULL; /* should never happen */ 703 704 /* If memory, add on the PCI bridge address offset */ 705 if (mmap_state == pci_mmap_mem) { 706 #if 0 /* See comment in pci_resource_to_user() for why this is disabled */ 707 *offset += hose->pci_mem_offset; 708 #endif 709 res_bit = IORESOURCE_MEM; 710 } else { 711 io_offset = (unsigned long)hose->io_base_virt - pci_io_base; 712 *offset += io_offset; 713 res_bit = IORESOURCE_IO; 714 } 715 716 /* 717 * Check that the offset requested corresponds to one of the 718 * resources of the device. 719 */ 720 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 721 struct resource *rp = &dev->resource[i]; 722 int flags = rp->flags; 723 724 /* treat ROM as memory (should be already) */ 725 if (i == PCI_ROM_RESOURCE) 726 flags |= IORESOURCE_MEM; 727 728 /* Active and same type? */ 729 if ((flags & res_bit) == 0) 730 continue; 731 732 /* In the range of this resource? */ 733 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) 734 continue; 735 736 /* found it! construct the final physical address */ 737 if (mmap_state == pci_mmap_io) 738 *offset += hose->io_base_phys - io_offset; 739 return rp; 740 } 741 742 return NULL; 743 } 744 745 /* 746 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 747 * device mapping. 748 */ 749 static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, 750 pgprot_t protection, 751 enum pci_mmap_state mmap_state, 752 int write_combine) 753 { 754 unsigned long prot = pgprot_val(protection); 755 756 /* Write combine is always 0 on non-memory space mappings. On 757 * memory space, if the user didn't pass 1, we check for a 758 * "prefetchable" resource. This is a bit hackish, but we use 759 * this to workaround the inability of /sysfs to provide a write 760 * combine bit 761 */ 762 if (mmap_state != pci_mmap_mem) 763 write_combine = 0; 764 else if (write_combine == 0) { 765 if (rp->flags & IORESOURCE_PREFETCH) 766 write_combine = 1; 767 } 768 769 /* XXX would be nice to have a way to ask for write-through */ 770 prot |= _PAGE_NO_CACHE; 771 if (write_combine) 772 prot &= ~_PAGE_GUARDED; 773 else 774 prot |= _PAGE_GUARDED; 775 776 return __pgprot(prot); 777 } 778 779 /* 780 * This one is used by /dev/mem and fbdev who have no clue about the 781 * PCI device, it tries to find the PCI device first and calls the 782 * above routine 783 */ 784 pgprot_t pci_phys_mem_access_prot(struct file *file, 785 unsigned long pfn, 786 unsigned long size, 787 pgprot_t protection) 788 { 789 struct pci_dev *pdev = NULL; 790 struct resource *found = NULL; 791 unsigned long prot = pgprot_val(protection); 792 unsigned long offset = pfn << PAGE_SHIFT; 793 int i; 794 795 if (page_is_ram(pfn)) 796 return __pgprot(prot); 797 798 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 799 800 for_each_pci_dev(pdev) { 801 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 802 struct resource *rp = &pdev->resource[i]; 803 int flags = rp->flags; 804 805 /* Active and same type? */ 806 if ((flags & IORESOURCE_MEM) == 0) 807 continue; 808 /* In the range of this resource? */ 809 if (offset < (rp->start & PAGE_MASK) || 810 offset > rp->end) 811 continue; 812 found = rp; 813 break; 814 } 815 if (found) 816 break; 817 } 818 if (found) { 819 if (found->flags & IORESOURCE_PREFETCH) 820 prot &= ~_PAGE_GUARDED; 821 pci_dev_put(pdev); 822 } 823 824 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); 825 826 return __pgprot(prot); 827 } 828 829 830 /* 831 * Perform the actual remap of the pages for a PCI device mapping, as 832 * appropriate for this architecture. The region in the process to map 833 * is described by vm_start and vm_end members of VMA, the base physical 834 * address is found in vm_pgoff. 835 * The pci device structure is provided so that architectures may make mapping 836 * decisions on a per-device or per-bus basis. 837 * 838 * Returns a negative error code on failure, zero on success. 839 */ 840 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 841 enum pci_mmap_state mmap_state, int write_combine) 842 { 843 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT; 844 struct resource *rp; 845 int ret; 846 847 rp = __pci_mmap_make_offset(dev, &offset, mmap_state); 848 if (rp == NULL) 849 return -EINVAL; 850 851 vma->vm_pgoff = offset >> PAGE_SHIFT; 852 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 853 vma->vm_page_prot, 854 mmap_state, write_combine); 855 856 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 857 vma->vm_end - vma->vm_start, vma->vm_page_prot); 858 859 return ret; 860 } 861 862 static ssize_t pci_show_devspec(struct device *dev, 863 struct device_attribute *attr, char *buf) 864 { 865 struct pci_dev *pdev; 866 struct device_node *np; 867 868 pdev = to_pci_dev (dev); 869 np = pci_device_to_OF_node(pdev); 870 if (np == NULL || np->full_name == NULL) 871 return 0; 872 return sprintf(buf, "%s", np->full_name); 873 } 874 static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 875 876 void pcibios_add_platform_entries(struct pci_dev *pdev) 877 { 878 device_create_file(&pdev->dev, &dev_attr_devspec); 879 } 880 881 #define ISA_SPACE_MASK 0x1 882 #define ISA_SPACE_IO 0x1 883 884 static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, 885 unsigned long phb_io_base_phys, 886 void __iomem * phb_io_base_virt) 887 { 888 /* Remove these asap */ 889 890 struct pci_address { 891 u32 a_hi; 892 u32 a_mid; 893 u32 a_lo; 894 }; 895 896 struct isa_address { 897 u32 a_hi; 898 u32 a_lo; 899 }; 900 901 struct isa_range { 902 struct isa_address isa_addr; 903 struct pci_address pci_addr; 904 unsigned int size; 905 }; 906 907 const struct isa_range *range; 908 unsigned long pci_addr; 909 unsigned int isa_addr; 910 unsigned int size; 911 int rlen = 0; 912 913 range = of_get_property(isa_node, "ranges", &rlen); 914 if (range == NULL || (rlen < sizeof(struct isa_range))) { 915 printk(KERN_ERR "no ISA ranges or unexpected isa range size," 916 "mapping 64k\n"); 917 __ioremap_explicit(phb_io_base_phys, 918 (unsigned long)phb_io_base_virt, 919 0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED); 920 return; 921 } 922 923 /* From "ISA Binding to 1275" 924 * The ranges property is laid out as an array of elements, 925 * each of which comprises: 926 * cells 0 - 1: an ISA address 927 * cells 2 - 4: a PCI address 928 * (size depending on dev->n_addr_cells) 929 * cell 5: the size of the range 930 */ 931 if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) { 932 isa_addr = range->isa_addr.a_lo; 933 pci_addr = (unsigned long) range->pci_addr.a_mid << 32 | 934 range->pci_addr.a_lo; 935 936 /* Assume these are both zero */ 937 if ((pci_addr != 0) || (isa_addr != 0)) { 938 printk(KERN_ERR "unexpected isa to pci mapping: %s\n", 939 __FUNCTION__); 940 return; 941 } 942 943 size = PAGE_ALIGN(range->size); 944 945 __ioremap_explicit(phb_io_base_phys, 946 (unsigned long) phb_io_base_virt, 947 size, _PAGE_NO_CACHE | _PAGE_GUARDED); 948 } 949 } 950 951 void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 952 struct device_node *dev, int prim) 953 { 954 const unsigned int *ranges; 955 unsigned int pci_space; 956 unsigned long size; 957 int rlen = 0; 958 int memno = 0; 959 struct resource *res; 960 int np, na = of_n_addr_cells(dev); 961 unsigned long pci_addr, cpu_phys_addr; 962 963 np = na + 5; 964 965 /* From "PCI Binding to 1275" 966 * The ranges property is laid out as an array of elements, 967 * each of which comprises: 968 * cells 0 - 2: a PCI address 969 * cells 3 or 3+4: a CPU physical address 970 * (size depending on dev->n_addr_cells) 971 * cells 4+5 or 5+6: the size of the range 972 */ 973 ranges = of_get_property(dev, "ranges", &rlen); 974 if (ranges == NULL) 975 return; 976 hose->io_base_phys = 0; 977 while ((rlen -= np * sizeof(unsigned int)) >= 0) { 978 res = NULL; 979 pci_space = ranges[0]; 980 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 981 cpu_phys_addr = of_translate_address(dev, &ranges[3]); 982 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4]; 983 ranges += np; 984 if (size == 0) 985 continue; 986 987 /* Now consume following elements while they are contiguous */ 988 while (rlen >= np * sizeof(unsigned int)) { 989 unsigned long addr, phys; 990 991 if (ranges[0] != pci_space) 992 break; 993 addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 994 phys = ranges[3]; 995 if (na >= 2) 996 phys = (phys << 32) | ranges[4]; 997 if (addr != pci_addr + size || 998 phys != cpu_phys_addr + size) 999 break; 1000 1001 size += ((unsigned long)ranges[na+3] << 32) 1002 | ranges[na+4]; 1003 ranges += np; 1004 rlen -= np * sizeof(unsigned int); 1005 } 1006 1007 switch ((pci_space >> 24) & 0x3) { 1008 case 1: /* I/O space */ 1009 hose->io_base_phys = cpu_phys_addr; 1010 hose->pci_io_size = size; 1011 1012 res = &hose->io_resource; 1013 res->flags = IORESOURCE_IO; 1014 res->start = pci_addr; 1015 DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number, 1016 res->start, res->start + size - 1); 1017 break; 1018 case 2: /* memory space */ 1019 memno = 0; 1020 while (memno < 3 && hose->mem_resources[memno].flags) 1021 ++memno; 1022 1023 if (memno == 0) 1024 hose->pci_mem_offset = cpu_phys_addr - pci_addr; 1025 if (memno < 3) { 1026 res = &hose->mem_resources[memno]; 1027 res->flags = IORESOURCE_MEM; 1028 res->start = cpu_phys_addr; 1029 DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number, 1030 res->start, res->start + size - 1); 1031 } 1032 break; 1033 } 1034 if (res != NULL) { 1035 res->name = dev->full_name; 1036 res->end = res->start + size - 1; 1037 res->parent = NULL; 1038 res->sibling = NULL; 1039 res->child = NULL; 1040 } 1041 } 1042 } 1043 1044 void __init pci_setup_phb_io(struct pci_controller *hose, int primary) 1045 { 1046 unsigned long size = hose->pci_io_size; 1047 unsigned long io_virt_offset; 1048 struct resource *res; 1049 struct device_node *isa_dn; 1050 1051 hose->io_base_virt = reserve_phb_iospace(size); 1052 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", 1053 hose->global_number, hose->io_base_phys, 1054 (unsigned long) hose->io_base_virt); 1055 1056 if (primary) { 1057 pci_io_base = (unsigned long)hose->io_base_virt; 1058 isa_dn = of_find_node_by_type(NULL, "isa"); 1059 if (isa_dn) { 1060 isa_io_base = pci_io_base; 1061 pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys, 1062 hose->io_base_virt); 1063 of_node_put(isa_dn); 1064 } 1065 } 1066 1067 io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; 1068 res = &hose->io_resource; 1069 res->start += io_virt_offset; 1070 res->end += io_virt_offset; 1071 } 1072 1073 void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose, 1074 int primary) 1075 { 1076 unsigned long size = hose->pci_io_size; 1077 unsigned long io_virt_offset; 1078 struct resource *res; 1079 1080 hose->io_base_virt = __ioremap(hose->io_base_phys, size, 1081 _PAGE_NO_CACHE | _PAGE_GUARDED); 1082 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", 1083 hose->global_number, hose->io_base_phys, 1084 (unsigned long) hose->io_base_virt); 1085 1086 if (primary) 1087 pci_io_base = (unsigned long)hose->io_base_virt; 1088 1089 io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; 1090 res = &hose->io_resource; 1091 res->start += io_virt_offset; 1092 res->end += io_virt_offset; 1093 } 1094 1095 1096 static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys, 1097 unsigned long *start_virt, unsigned long *size) 1098 { 1099 struct pci_controller *hose = pci_bus_to_host(bus); 1100 struct pci_bus_region region; 1101 struct resource *res; 1102 1103 if (bus->self) { 1104 res = bus->resource[0]; 1105 pcibios_resource_to_bus(bus->self, ®ion, res); 1106 *start_phys = hose->io_base_phys + region.start; 1107 *start_virt = (unsigned long) hose->io_base_virt + 1108 region.start; 1109 if (region.end > region.start) 1110 *size = region.end - region.start + 1; 1111 else { 1112 printk("%s(): unexpected region 0x%lx->0x%lx\n", 1113 __FUNCTION__, region.start, region.end); 1114 return 1; 1115 } 1116 1117 } else { 1118 /* Root Bus */ 1119 res = &hose->io_resource; 1120 *start_phys = hose->io_base_phys; 1121 *start_virt = (unsigned long) hose->io_base_virt; 1122 if (res->end > res->start) 1123 *size = res->end - res->start + 1; 1124 else { 1125 printk("%s(): unexpected region 0x%lx->0x%lx\n", 1126 __FUNCTION__, res->start, res->end); 1127 return 1; 1128 } 1129 } 1130 1131 return 0; 1132 } 1133 1134 int unmap_bus_range(struct pci_bus *bus) 1135 { 1136 unsigned long start_phys; 1137 unsigned long start_virt; 1138 unsigned long size; 1139 1140 if (!bus) { 1141 printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); 1142 return 1; 1143 } 1144 1145 if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) 1146 return 1; 1147 if (__iounmap_explicit((void __iomem *) start_virt, size)) 1148 return 1; 1149 1150 return 0; 1151 } 1152 EXPORT_SYMBOL(unmap_bus_range); 1153 1154 int remap_bus_range(struct pci_bus *bus) 1155 { 1156 unsigned long start_phys; 1157 unsigned long start_virt; 1158 unsigned long size; 1159 1160 if (!bus) { 1161 printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); 1162 return 1; 1163 } 1164 1165 1166 if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) 1167 return 1; 1168 if (start_phys == 0) 1169 return 1; 1170 printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size); 1171 if (__ioremap_explicit(start_phys, start_virt, size, 1172 _PAGE_NO_CACHE | _PAGE_GUARDED)) 1173 return 1; 1174 1175 return 0; 1176 } 1177 EXPORT_SYMBOL(remap_bus_range); 1178 1179 static void phbs_remap_io(void) 1180 { 1181 struct pci_controller *hose, *tmp; 1182 1183 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1184 remap_bus_range(hose->bus); 1185 } 1186 1187 static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) 1188 { 1189 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1190 unsigned long offset; 1191 1192 if (res->flags & IORESOURCE_IO) { 1193 offset = (unsigned long)hose->io_base_virt - pci_io_base; 1194 1195 res->start += offset; 1196 res->end += offset; 1197 } else if (res->flags & IORESOURCE_MEM) { 1198 res->start += hose->pci_mem_offset; 1199 res->end += hose->pci_mem_offset; 1200 } 1201 } 1202 1203 void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, 1204 struct pci_bus *bus) 1205 { 1206 /* Update device resources. */ 1207 int i; 1208 1209 for (i = 0; i < PCI_NUM_RESOURCES; i++) 1210 if (dev->resource[i].flags) 1211 fixup_resource(&dev->resource[i], dev); 1212 } 1213 EXPORT_SYMBOL(pcibios_fixup_device_resources); 1214 1215 void __devinit pcibios_setup_new_device(struct pci_dev *dev) 1216 { 1217 struct dev_archdata *sd = &dev->dev.archdata; 1218 1219 sd->of_node = pci_device_to_OF_node(dev); 1220 1221 DBG("PCI device %s OF node: %s\n", pci_name(dev), 1222 sd->of_node ? sd->of_node->full_name : "<none>"); 1223 1224 sd->dma_ops = pci_dma_ops; 1225 #ifdef CONFIG_NUMA 1226 sd->numa_node = pcibus_to_node(dev->bus); 1227 #else 1228 sd->numa_node = -1; 1229 #endif 1230 if (ppc_md.pci_dma_dev_setup) 1231 ppc_md.pci_dma_dev_setup(dev); 1232 } 1233 EXPORT_SYMBOL(pcibios_setup_new_device); 1234 1235 static void __devinit do_bus_setup(struct pci_bus *bus) 1236 { 1237 struct pci_dev *dev; 1238 1239 if (ppc_md.pci_dma_bus_setup) 1240 ppc_md.pci_dma_bus_setup(bus); 1241 1242 list_for_each_entry(dev, &bus->devices, bus_list) 1243 pcibios_setup_new_device(dev); 1244 1245 /* Read default IRQs and fixup if necessary */ 1246 list_for_each_entry(dev, &bus->devices, bus_list) { 1247 pci_read_irq_line(dev); 1248 if (ppc_md.pci_irq_fixup) 1249 ppc_md.pci_irq_fixup(dev); 1250 } 1251 } 1252 1253 void __devinit pcibios_fixup_bus(struct pci_bus *bus) 1254 { 1255 struct pci_dev *dev = bus->self; 1256 struct device_node *np; 1257 1258 np = pci_bus_to_OF_node(bus); 1259 1260 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>"); 1261 1262 if (dev && pci_probe_only && 1263 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 1264 /* This is a subordinate bridge */ 1265 1266 pci_read_bridge_bases(bus); 1267 pcibios_fixup_device_resources(dev, bus); 1268 } 1269 1270 do_bus_setup(bus); 1271 1272 if (!pci_probe_only) 1273 return; 1274 1275 list_for_each_entry(dev, &bus->devices, bus_list) 1276 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) 1277 pcibios_fixup_device_resources(dev, bus); 1278 } 1279 EXPORT_SYMBOL(pcibios_fixup_bus); 1280 1281 /* 1282 * Reads the interrupt pin to determine if interrupt is use by card. 1283 * If the interrupt is used, then gets the interrupt line from the 1284 * openfirmware and sets it in the pci_dev and pci_config line. 1285 */ 1286 int pci_read_irq_line(struct pci_dev *pci_dev) 1287 { 1288 struct of_irq oirq; 1289 unsigned int virq; 1290 1291 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1292 1293 #ifdef DEBUG 1294 memset(&oirq, 0xff, sizeof(oirq)); 1295 #endif 1296 /* Try to get a mapping from the device-tree */ 1297 if (of_irq_map_pci(pci_dev, &oirq)) { 1298 u8 line, pin; 1299 1300 /* If that fails, lets fallback to what is in the config 1301 * space and map that through the default controller. We 1302 * also set the type to level low since that's what PCI 1303 * interrupts are. If your platform does differently, then 1304 * either provide a proper interrupt tree or don't use this 1305 * function. 1306 */ 1307 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) 1308 return -1; 1309 if (pin == 0) 1310 return -1; 1311 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || 1312 line == 0xff) { 1313 return -1; 1314 } 1315 DBG(" -> no map ! Using irq line %d from PCI config\n", line); 1316 1317 virq = irq_create_mapping(NULL, line); 1318 if (virq != NO_IRQ) 1319 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 1320 } else { 1321 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 1322 oirq.size, oirq.specifier[0], oirq.specifier[1], 1323 oirq.controller->full_name); 1324 1325 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 1326 oirq.size); 1327 } 1328 if(virq == NO_IRQ) { 1329 DBG(" -> failed to map !\n"); 1330 return -1; 1331 } 1332 1333 DBG(" -> mapped to linux irq %d\n", virq); 1334 1335 pci_dev->irq = virq; 1336 1337 return 0; 1338 } 1339 EXPORT_SYMBOL(pci_read_irq_line); 1340 1341 void pci_resource_to_user(const struct pci_dev *dev, int bar, 1342 const struct resource *rsrc, 1343 resource_size_t *start, resource_size_t *end) 1344 { 1345 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1346 resource_size_t offset = 0; 1347 1348 if (hose == NULL) 1349 return; 1350 1351 if (rsrc->flags & IORESOURCE_IO) 1352 offset = (unsigned long)hose->io_base_virt - pci_io_base; 1353 1354 /* We pass a fully fixed up address to userland for MMIO instead of 1355 * a BAR value because X is lame and expects to be able to use that 1356 * to pass to /dev/mem ! 1357 * 1358 * That means that we'll have potentially 64 bits values where some 1359 * userland apps only expect 32 (like X itself since it thinks only 1360 * Sparc has 64 bits MMIO) but if we don't do that, we break it on 1361 * 32 bits CHRPs :-( 1362 * 1363 * Hopefully, the sysfs insterface is immune to that gunk. Once X 1364 * has been fixed (and the fix spread enough), we can re-enable the 1365 * 2 lines below and pass down a BAR value to userland. In that case 1366 * we'll also have to re-enable the matching code in 1367 * __pci_mmap_make_offset(). 1368 * 1369 * BenH. 1370 */ 1371 #if 0 1372 else if (rsrc->flags & IORESOURCE_MEM) 1373 offset = hose->pci_mem_offset; 1374 #endif 1375 1376 *start = rsrc->start - offset; 1377 *end = rsrc->end - offset; 1378 } 1379 1380 struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) 1381 { 1382 if (!have_of) 1383 return NULL; 1384 while(node) { 1385 struct pci_controller *hose, *tmp; 1386 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1387 if (hose->arch_data == node) 1388 return hose; 1389 node = node->parent; 1390 } 1391 return NULL; 1392 } 1393 1394 unsigned long pci_address_to_pio(phys_addr_t address) 1395 { 1396 struct pci_controller *hose, *tmp; 1397 1398 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 1399 if (address >= hose->io_base_phys && 1400 address < (hose->io_base_phys + hose->pci_io_size)) { 1401 unsigned long base = 1402 (unsigned long)hose->io_base_virt - pci_io_base; 1403 return base + (address - hose->io_base_phys); 1404 } 1405 } 1406 return (unsigned int)-1; 1407 } 1408 EXPORT_SYMBOL_GPL(pci_address_to_pio); 1409 1410 1411 #define IOBASE_BRIDGE_NUMBER 0 1412 #define IOBASE_MEMORY 1 1413 #define IOBASE_IO 2 1414 #define IOBASE_ISA_IO 3 1415 #define IOBASE_ISA_MEM 4 1416 1417 long sys_pciconfig_iobase(long which, unsigned long in_bus, 1418 unsigned long in_devfn) 1419 { 1420 struct pci_controller* hose; 1421 struct list_head *ln; 1422 struct pci_bus *bus = NULL; 1423 struct device_node *hose_node; 1424 1425 /* Argh ! Please forgive me for that hack, but that's the 1426 * simplest way to get existing XFree to not lockup on some 1427 * G5 machines... So when something asks for bus 0 io base 1428 * (bus 0 is HT root), we return the AGP one instead. 1429 */ 1430 if (machine_is_compatible("MacRISC4")) 1431 if (in_bus == 0) 1432 in_bus = 0xf0; 1433 1434 /* That syscall isn't quite compatible with PCI domains, but it's 1435 * used on pre-domains setup. We return the first match 1436 */ 1437 1438 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { 1439 bus = pci_bus_b(ln); 1440 if (in_bus >= bus->number && in_bus <= bus->subordinate) 1441 break; 1442 bus = NULL; 1443 } 1444 if (bus == NULL || bus->sysdata == NULL) 1445 return -ENODEV; 1446 1447 hose_node = (struct device_node *)bus->sysdata; 1448 hose = PCI_DN(hose_node)->phb; 1449 1450 switch (which) { 1451 case IOBASE_BRIDGE_NUMBER: 1452 return (long)hose->first_busno; 1453 case IOBASE_MEMORY: 1454 return (long)hose->pci_mem_offset; 1455 case IOBASE_IO: 1456 return (long)hose->io_base_phys; 1457 case IOBASE_ISA_IO: 1458 return (long)isa_io_base; 1459 case IOBASE_ISA_MEM: 1460 return -EINVAL; 1461 } 1462 1463 return -EOPNOTSUPP; 1464 } 1465 1466 #ifdef CONFIG_NUMA 1467 int pcibus_to_node(struct pci_bus *bus) 1468 { 1469 struct pci_controller *phb = pci_bus_to_host(bus); 1470 return phb->node; 1471 } 1472 EXPORT_SYMBOL(pcibus_to_node); 1473 #endif 1474