1 // SPDX-License-Identifier: GPL-2.0 2 /* pci.c: UltraSparc PCI controller support. 3 * 4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com) 5 * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) 6 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) 7 * 8 * OF tree based PCI bus probing taken from the PowerPC port 9 * with minor modifications, see there for credits. 10 */ 11 12 #include <linux/export.h> 13 #include <linux/kernel.h> 14 #include <linux/string.h> 15 #include <linux/sched.h> 16 #include <linux/capability.h> 17 #include <linux/errno.h> 18 #include <linux/pci.h> 19 #include <linux/msi.h> 20 #include <linux/irq.h> 21 #include <linux/init.h> 22 #include <linux/of.h> 23 #include <linux/of_platform.h> 24 #include <linux/pgtable.h> 25 #include <linux/platform_device.h> 26 27 #include <linux/uaccess.h> 28 #include <asm/irq.h> 29 #include <asm/prom.h> 30 #include <asm/apb.h> 31 32 #include "pci_impl.h" 33 #include "kernel.h" 34 35 /* List of all PCI controllers found in the system. */ 36 struct pci_pbm_info *pci_pbm_root = NULL; 37 38 /* Each PBM found gets a unique index. */ 39 int pci_num_pbms = 0; 40 41 volatile int pci_poke_in_progress; 42 volatile int pci_poke_cpu = -1; 43 volatile int pci_poke_faulted; 44 45 static DEFINE_SPINLOCK(pci_poke_lock); 46 47 void pci_config_read8(u8 *addr, u8 *ret) 48 { 49 unsigned long flags; 50 u8 byte; 51 52 spin_lock_irqsave(&pci_poke_lock, flags); 53 pci_poke_cpu = smp_processor_id(); 54 pci_poke_in_progress = 1; 55 pci_poke_faulted = 0; 56 __asm__ __volatile__("membar #Sync\n\t" 57 "lduba [%1] %2, %0\n\t" 58 "membar #Sync" 59 : "=r" (byte) 60 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 61 : "memory"); 62 pci_poke_in_progress = 0; 63 pci_poke_cpu = -1; 64 if (!pci_poke_faulted) 65 *ret = byte; 66 spin_unlock_irqrestore(&pci_poke_lock, flags); 67 } 68 69 void pci_config_read16(u16 *addr, u16 *ret) 70 { 71 unsigned long flags; 72 u16 word; 73 74 spin_lock_irqsave(&pci_poke_lock, flags); 75 pci_poke_cpu = smp_processor_id(); 76 pci_poke_in_progress = 1; 77 pci_poke_faulted = 0; 78 __asm__ __volatile__("membar #Sync\n\t" 79 "lduha [%1] %2, %0\n\t" 80 "membar #Sync" 81 : "=r" (word) 82 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 83 : "memory"); 84 pci_poke_in_progress = 0; 85 pci_poke_cpu = -1; 86 if (!pci_poke_faulted) 87 *ret = word; 88 spin_unlock_irqrestore(&pci_poke_lock, flags); 89 } 90 91 void pci_config_read32(u32 *addr, u32 *ret) 92 { 93 unsigned long flags; 94 u32 dword; 95 96 spin_lock_irqsave(&pci_poke_lock, flags); 97 pci_poke_cpu = smp_processor_id(); 98 pci_poke_in_progress = 1; 99 pci_poke_faulted = 0; 100 __asm__ __volatile__("membar #Sync\n\t" 101 "lduwa [%1] %2, %0\n\t" 102 "membar #Sync" 103 : "=r" (dword) 104 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 105 : "memory"); 106 pci_poke_in_progress = 0; 107 pci_poke_cpu = -1; 108 if (!pci_poke_faulted) 109 *ret = dword; 110 spin_unlock_irqrestore(&pci_poke_lock, flags); 111 } 112 113 void pci_config_write8(u8 *addr, u8 val) 114 { 115 unsigned long flags; 116 117 spin_lock_irqsave(&pci_poke_lock, flags); 118 pci_poke_cpu = smp_processor_id(); 119 pci_poke_in_progress = 1; 120 pci_poke_faulted = 0; 121 __asm__ __volatile__("membar #Sync\n\t" 122 "stba %0, [%1] %2\n\t" 123 "membar #Sync" 124 : /* no outputs */ 125 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 126 : "memory"); 127 pci_poke_in_progress = 0; 128 pci_poke_cpu = -1; 129 spin_unlock_irqrestore(&pci_poke_lock, flags); 130 } 131 132 void pci_config_write16(u16 *addr, u16 val) 133 { 134 unsigned long flags; 135 136 spin_lock_irqsave(&pci_poke_lock, flags); 137 pci_poke_cpu = smp_processor_id(); 138 pci_poke_in_progress = 1; 139 pci_poke_faulted = 0; 140 __asm__ __volatile__("membar #Sync\n\t" 141 "stha %0, [%1] %2\n\t" 142 "membar #Sync" 143 : /* no outputs */ 144 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 145 : "memory"); 146 pci_poke_in_progress = 0; 147 pci_poke_cpu = -1; 148 spin_unlock_irqrestore(&pci_poke_lock, flags); 149 } 150 151 void pci_config_write32(u32 *addr, u32 val) 152 { 153 unsigned long flags; 154 155 spin_lock_irqsave(&pci_poke_lock, flags); 156 pci_poke_cpu = smp_processor_id(); 157 pci_poke_in_progress = 1; 158 pci_poke_faulted = 0; 159 __asm__ __volatile__("membar #Sync\n\t" 160 "stwa %0, [%1] %2\n\t" 161 "membar #Sync" 162 : /* no outputs */ 163 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 164 : "memory"); 165 pci_poke_in_progress = 0; 166 pci_poke_cpu = -1; 167 spin_unlock_irqrestore(&pci_poke_lock, flags); 168 } 169 170 static int ofpci_verbose; 171 172 static int __init ofpci_debug(char *str) 173 { 174 int val = 0; 175 176 get_option(&str, &val); 177 if (val) 178 ofpci_verbose = 1; 179 return 1; 180 } 181 182 __setup("ofpci_debug=", ofpci_debug); 183 184 static unsigned long pci_parse_of_flags(u32 addr0) 185 { 186 unsigned long flags = 0; 187 188 if (addr0 & 0x02000000) { 189 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; 190 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; 191 if (addr0 & 0x01000000) 192 flags |= IORESOURCE_MEM_64 193 | PCI_BASE_ADDRESS_MEM_TYPE_64; 194 if (addr0 & 0x40000000) 195 flags |= IORESOURCE_PREFETCH 196 | PCI_BASE_ADDRESS_MEM_PREFETCH; 197 } else if (addr0 & 0x01000000) 198 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; 199 return flags; 200 } 201 202 /* The of_device layer has translated all of the assigned-address properties 203 * into physical address resources, we only have to figure out the register 204 * mapping. 205 */ 206 static void pci_parse_of_addrs(struct platform_device *op, 207 struct device_node *node, 208 struct pci_dev *dev) 209 { 210 struct resource *op_res; 211 const u32 *addrs; 212 int proplen; 213 214 addrs = of_get_property(node, "assigned-addresses", &proplen); 215 if (!addrs) 216 return; 217 if (ofpci_verbose) 218 pci_info(dev, " parse addresses (%d bytes) @ %p\n", 219 proplen, addrs); 220 op_res = &op->resource[0]; 221 for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { 222 struct resource *res; 223 unsigned long flags; 224 int i; 225 226 flags = pci_parse_of_flags(addrs[0]); 227 if (!flags) 228 continue; 229 i = addrs[0] & 0xff; 230 if (ofpci_verbose) 231 pci_info(dev, " start: %llx, end: %llx, i: %x\n", 232 op_res->start, op_res->end, i); 233 234 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 235 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 236 } else if (i == dev->rom_base_reg) { 237 res = &dev->resource[PCI_ROM_RESOURCE]; 238 flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; 239 } else { 240 pci_err(dev, "bad cfg reg num 0x%x\n", i); 241 continue; 242 } 243 res->start = op_res->start; 244 res->end = op_res->end; 245 res->flags = flags; 246 res->name = pci_name(dev); 247 248 pci_info(dev, "reg 0x%x: %pR\n", i, res); 249 } 250 } 251 252 static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu, 253 void *stc, void *host_controller, 254 struct platform_device *op, 255 int numa_node) 256 { 257 sd->iommu = iommu; 258 sd->stc = stc; 259 sd->host_controller = host_controller; 260 sd->op = op; 261 sd->numa_node = numa_node; 262 } 263 264 static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, 265 struct device_node *node, 266 struct pci_bus *bus, int devfn) 267 { 268 struct dev_archdata *sd; 269 struct platform_device *op; 270 struct pci_dev *dev; 271 u32 class; 272 273 dev = pci_alloc_dev(bus); 274 if (!dev) 275 return NULL; 276 277 op = of_find_device_by_node(node); 278 sd = &dev->dev.archdata; 279 pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op, 280 pbm->numa_node); 281 sd = &op->dev.archdata; 282 sd->iommu = pbm->iommu; 283 sd->stc = &pbm->stc; 284 sd->numa_node = pbm->numa_node; 285 286 if (of_node_name_eq(node, "ebus")) 287 of_propagate_archdata(op); 288 289 if (ofpci_verbose) 290 pci_info(bus," create device, devfn: %x, type: %s\n", 291 devfn, of_node_get_device_type(node)); 292 293 dev->sysdata = node; 294 dev->dev.parent = bus->bridge; 295 dev->dev.bus = &pci_bus_type; 296 dev->dev.of_node = of_node_get(node); 297 dev->devfn = devfn; 298 dev->multifunction = 0; /* maybe a lie? */ 299 set_pcie_port_type(dev); 300 301 pci_dev_assign_slot(dev); 302 dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); 303 dev->device = of_getintprop_default(node, "device-id", 0xffff); 304 dev->subsystem_vendor = 305 of_getintprop_default(node, "subsystem-vendor-id", 0); 306 dev->subsystem_device = 307 of_getintprop_default(node, "subsystem-id", 0); 308 309 dev->cfg_size = pci_cfg_space_size(dev); 310 311 /* We can't actually use the firmware value, we have 312 * to read what is in the register right now. One 313 * reason is that in the case of IDE interfaces the 314 * firmware can sample the value before the IDE 315 * interface is programmed into native mode. 316 */ 317 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); 318 dev->class = class >> 8; 319 dev->revision = class & 0xff; 320 321 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus), 322 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 323 324 /* I have seen IDE devices which will not respond to 325 * the bmdma simplex check reads if bus mastering is 326 * disabled. 327 */ 328 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) 329 pci_set_master(dev); 330 331 dev->current_state = PCI_UNKNOWN; /* unknown power state */ 332 dev->error_state = pci_channel_io_normal; 333 dev->dma_mask = 0xffffffff; 334 335 if (of_node_name_eq(node, "pci")) { 336 /* a PCI-PCI bridge */ 337 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; 338 dev->rom_base_reg = PCI_ROM_ADDRESS1; 339 } else if (of_node_is_type(node, "cardbus")) { 340 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; 341 } else { 342 dev->hdr_type = PCI_HEADER_TYPE_NORMAL; 343 dev->rom_base_reg = PCI_ROM_ADDRESS; 344 345 dev->irq = sd->op->archdata.irqs[0]; 346 if (dev->irq == 0xffffffff) 347 dev->irq = PCI_IRQ_NONE; 348 } 349 350 pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", 351 dev->vendor, dev->device, dev->hdr_type, dev->class); 352 353 pci_parse_of_addrs(sd->op, node, dev); 354 355 if (ofpci_verbose) 356 pci_info(dev, " adding to system ...\n"); 357 358 pci_device_add(dev, bus); 359 360 return dev; 361 } 362 363 static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) 364 { 365 u32 idx, first, last; 366 367 first = 8; 368 last = 0; 369 for (idx = 0; idx < 8; idx++) { 370 if ((map & (1 << idx)) != 0) { 371 if (first > idx) 372 first = idx; 373 if (last < idx) 374 last = idx; 375 } 376 } 377 378 *first_p = first; 379 *last_p = last; 380 } 381 382 /* Cook up fake bus resources for SUNW,simba PCI bridges which lack 383 * a proper 'ranges' property. 384 */ 385 static void apb_fake_ranges(struct pci_dev *dev, 386 struct pci_bus *bus, 387 struct pci_pbm_info *pbm) 388 { 389 struct pci_bus_region region; 390 struct resource *res; 391 u32 first, last; 392 u8 map; 393 394 pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map); 395 apb_calc_first_last(map, &first, &last); 396 res = bus->resource[0]; 397 res->flags = IORESOURCE_IO; 398 region.start = (first << 21); 399 region.end = (last << 21) + ((1 << 21) - 1); 400 pcibios_bus_to_resource(dev->bus, res, ®ion); 401 402 pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); 403 apb_calc_first_last(map, &first, &last); 404 res = bus->resource[1]; 405 res->flags = IORESOURCE_MEM; 406 region.start = (first << 29); 407 region.end = (last << 29) + ((1 << 29) - 1); 408 pcibios_bus_to_resource(dev->bus, res, ®ion); 409 } 410 411 static void pci_of_scan_bus(struct pci_pbm_info *pbm, 412 struct device_node *node, 413 struct pci_bus *bus); 414 415 #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) 416 417 static void of_scan_pci_bridge(struct pci_pbm_info *pbm, 418 struct device_node *node, 419 struct pci_dev *dev) 420 { 421 struct pci_bus *bus; 422 const u32 *busrange, *ranges; 423 int len, i, simba; 424 struct pci_bus_region region; 425 struct resource *res; 426 unsigned int flags; 427 u64 size; 428 429 if (ofpci_verbose) 430 pci_info(dev, "of_scan_pci_bridge(%pOF)\n", node); 431 432 /* parse bus-range property */ 433 busrange = of_get_property(node, "bus-range", &len); 434 if (busrange == NULL || len != 8) { 435 pci_info(dev, "Can't get bus-range for PCI-PCI bridge %pOF\n", 436 node); 437 return; 438 } 439 440 if (ofpci_verbose) 441 pci_info(dev, " Bridge bus range [%u --> %u]\n", 442 busrange[0], busrange[1]); 443 444 ranges = of_get_property(node, "ranges", &len); 445 simba = 0; 446 if (ranges == NULL) { 447 const char *model = of_get_property(node, "model", NULL); 448 if (model && !strcmp(model, "SUNW,simba")) 449 simba = 1; 450 } 451 452 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 453 if (!bus) { 454 pci_err(dev, "Failed to create pci bus for %pOF\n", 455 node); 456 return; 457 } 458 459 bus->primary = dev->bus->number; 460 pci_bus_insert_busn_res(bus, busrange[0], busrange[1]); 461 bus->bridge_ctl = 0; 462 463 if (ofpci_verbose) 464 pci_info(dev, " Bridge ranges[%p] simba[%d]\n", 465 ranges, simba); 466 467 /* parse ranges property, or cook one up by hand for Simba */ 468 /* PCI #address-cells == 3 and #size-cells == 2 always */ 469 res = &dev->resource[PCI_BRIDGE_RESOURCES]; 470 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { 471 res->flags = 0; 472 bus->resource[i] = res; 473 ++res; 474 } 475 if (simba) { 476 apb_fake_ranges(dev, bus, pbm); 477 goto after_ranges; 478 } else if (ranges == NULL) { 479 pci_read_bridge_bases(bus); 480 goto after_ranges; 481 } 482 i = 1; 483 for (; len >= 32; len -= 32, ranges += 8) { 484 u64 start; 485 486 if (ofpci_verbose) 487 pci_info(dev, " RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:" 488 "%08x:%08x]\n", 489 ranges[0], ranges[1], ranges[2], ranges[3], 490 ranges[4], ranges[5], ranges[6], ranges[7]); 491 492 flags = pci_parse_of_flags(ranges[0]); 493 size = GET_64BIT(ranges, 6); 494 if (flags == 0 || size == 0) 495 continue; 496 497 /* On PCI-Express systems, PCI bridges that have no devices downstream 498 * have a bogus size value where the first 32-bit cell is 0xffffffff. 499 * This results in a bogus range where start + size overflows. 500 * 501 * Just skip these otherwise the kernel will complain when the resource 502 * tries to be claimed. 503 */ 504 if (size >> 32 == 0xffffffff) 505 continue; 506 507 if (flags & IORESOURCE_IO) { 508 res = bus->resource[0]; 509 if (res->flags) { 510 pci_err(dev, "ignoring extra I/O range" 511 " for bridge %pOF\n", node); 512 continue; 513 } 514 } else { 515 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { 516 pci_err(dev, "too many memory ranges" 517 " for bridge %pOF\n", node); 518 continue; 519 } 520 res = bus->resource[i]; 521 ++i; 522 } 523 524 res->flags = flags; 525 region.start = start = GET_64BIT(ranges, 1); 526 region.end = region.start + size - 1; 527 528 if (ofpci_verbose) 529 pci_info(dev, " Using flags[%08x] start[%016llx] size[%016llx]\n", 530 flags, start, size); 531 532 pcibios_bus_to_resource(dev->bus, res, ®ion); 533 } 534 after_ranges: 535 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 536 bus->number); 537 if (ofpci_verbose) 538 pci_info(dev, " bus name: %s\n", bus->name); 539 540 pci_of_scan_bus(pbm, node, bus); 541 } 542 543 static void pci_of_scan_bus(struct pci_pbm_info *pbm, 544 struct device_node *node, 545 struct pci_bus *bus) 546 { 547 struct device_node *child; 548 const u32 *reg; 549 int reglen, devfn, prev_devfn; 550 struct pci_dev *dev; 551 552 if (ofpci_verbose) 553 pci_info(bus, "scan_bus[%pOF] bus no %d\n", 554 node, bus->number); 555 556 prev_devfn = -1; 557 for_each_child_of_node(node, child) { 558 if (ofpci_verbose) 559 pci_info(bus, " * %pOF\n", child); 560 reg = of_get_property(child, "reg", ®len); 561 if (reg == NULL || reglen < 20) 562 continue; 563 564 devfn = (reg[0] >> 8) & 0xff; 565 566 /* This is a workaround for some device trees 567 * which list PCI devices twice. On the V100 568 * for example, device number 3 is listed twice. 569 * Once as "pm" and once again as "lomp". 570 */ 571 if (devfn == prev_devfn) 572 continue; 573 prev_devfn = devfn; 574 575 /* create a new pci_dev for this device */ 576 dev = of_create_pci_dev(pbm, child, bus, devfn); 577 if (!dev) 578 continue; 579 if (ofpci_verbose) 580 pci_info(dev, "dev header type: %x\n", dev->hdr_type); 581 582 if (pci_is_bridge(dev)) 583 of_scan_pci_bridge(pbm, child, dev); 584 } 585 } 586 587 static ssize_t 588 show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf) 589 { 590 struct pci_dev *pdev; 591 struct device_node *dp; 592 593 pdev = to_pci_dev(dev); 594 dp = pdev->dev.of_node; 595 596 return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp); 597 } 598 599 static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); 600 601 static void pci_bus_register_of_sysfs(struct pci_bus *bus) 602 { 603 struct pci_dev *dev; 604 struct pci_bus *child_bus; 605 int err; 606 607 list_for_each_entry(dev, &bus->devices, bus_list) { 608 /* we don't really care if we can create this file or 609 * not, but we need to assign the result of the call 610 * or the world will fall under alien invasion and 611 * everybody will be frozen on a spaceship ready to be 612 * eaten on alpha centauri by some green and jelly 613 * humanoid. 614 */ 615 err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr); 616 (void) err; 617 } 618 list_for_each_entry(child_bus, &bus->children, node) 619 pci_bus_register_of_sysfs(child_bus); 620 } 621 622 static void pci_claim_legacy_resources(struct pci_dev *dev) 623 { 624 struct pci_bus_region region; 625 struct resource *p, *root, *conflict; 626 627 if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) 628 return; 629 630 p = kzalloc(sizeof(*p), GFP_KERNEL); 631 if (!p) 632 return; 633 634 p->name = "Video RAM area"; 635 p->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 636 637 region.start = 0xa0000UL; 638 region.end = region.start + 0x1ffffUL; 639 pcibios_bus_to_resource(dev->bus, p, ®ion); 640 641 root = pci_find_parent_resource(dev, p); 642 if (!root) { 643 pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p); 644 goto err; 645 } 646 647 conflict = request_resource_conflict(root, p); 648 if (conflict) { 649 pci_info(dev, "can't claim VGA legacy %pR: address conflict with %s %pR\n", 650 p, conflict->name, conflict); 651 goto err; 652 } 653 654 pci_info(dev, "VGA legacy framebuffer %pR\n", p); 655 return; 656 657 err: 658 kfree(p); 659 } 660 661 static void pci_claim_bus_resources(struct pci_bus *bus) 662 { 663 struct pci_bus *child_bus; 664 struct pci_dev *dev; 665 666 list_for_each_entry(dev, &bus->devices, bus_list) { 667 struct resource *r; 668 int i; 669 670 pci_dev_for_each_resource(dev, r, i) { 671 if (r->parent || !r->start || !r->flags) 672 continue; 673 674 if (ofpci_verbose) 675 pci_info(dev, "Claiming Resource %d: %pR\n", 676 i, r); 677 678 pci_claim_resource(dev, i); 679 } 680 681 pci_claim_legacy_resources(dev); 682 } 683 684 list_for_each_entry(child_bus, &bus->children, node) 685 pci_claim_bus_resources(child_bus); 686 } 687 688 struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, 689 struct device *parent) 690 { 691 LIST_HEAD(resources); 692 struct device_node *node = pbm->op->dev.of_node; 693 struct pci_bus *bus; 694 695 printk("PCI: Scanning PBM %pOF\n", node); 696 697 pci_add_resource_offset(&resources, &pbm->io_space, 698 pbm->io_offset); 699 pci_add_resource_offset(&resources, &pbm->mem_space, 700 pbm->mem_offset); 701 if (pbm->mem64_space.flags) 702 pci_add_resource_offset(&resources, &pbm->mem64_space, 703 pbm->mem64_offset); 704 pbm->busn.start = pbm->pci_first_busno; 705 pbm->busn.end = pbm->pci_last_busno; 706 pbm->busn.flags = IORESOURCE_BUS; 707 pci_add_resource(&resources, &pbm->busn); 708 bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops, 709 pbm, &resources); 710 if (!bus) { 711 printk(KERN_ERR "Failed to create bus for %pOF\n", node); 712 pci_free_resource_list(&resources); 713 return NULL; 714 } 715 716 pci_of_scan_bus(pbm, node, bus); 717 pci_bus_register_of_sysfs(bus); 718 719 pci_claim_bus_resources(bus); 720 721 pci_bus_add_devices(bus); 722 return bus; 723 } 724 725 /* Platform support for /proc/bus/pci/X/Y mmap()s. */ 726 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma) 727 { 728 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 729 resource_size_t ioaddr = pci_resource_start(pdev, bar); 730 731 if (!pbm) 732 return -EINVAL; 733 734 vma->vm_pgoff += (ioaddr + pbm->io_space.start) >> PAGE_SHIFT; 735 736 return 0; 737 } 738 739 #ifdef CONFIG_NUMA 740 int pcibus_to_node(struct pci_bus *pbus) 741 { 742 struct pci_pbm_info *pbm = pbus->sysdata; 743 744 return pbm->numa_node; 745 } 746 EXPORT_SYMBOL(pcibus_to_node); 747 #endif 748 749 /* Return the domain number for this pci bus */ 750 751 int pci_domain_nr(struct pci_bus *pbus) 752 { 753 struct pci_pbm_info *pbm = pbus->sysdata; 754 int ret; 755 756 if (!pbm) { 757 ret = -ENXIO; 758 } else { 759 ret = pbm->index; 760 } 761 762 return ret; 763 } 764 EXPORT_SYMBOL(pci_domain_nr); 765 766 #ifdef CONFIG_PCI_MSI 767 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) 768 { 769 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 770 unsigned int irq; 771 772 if (!pbm->setup_msi_irq) 773 return -EINVAL; 774 775 return pbm->setup_msi_irq(&irq, pdev, desc); 776 } 777 778 void arch_teardown_msi_irq(unsigned int irq) 779 { 780 struct msi_desc *entry = irq_get_msi_desc(irq); 781 struct pci_dev *pdev = msi_desc_to_pci_dev(entry); 782 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 783 784 if (pbm->teardown_msi_irq) 785 pbm->teardown_msi_irq(irq, pdev); 786 } 787 #endif /* !(CONFIG_PCI_MSI) */ 788 789 /* ALI sound chips generate 31-bits of DMA, a special register 790 * determines what bit 31 is emitted as. 791 */ 792 int ali_sound_dma_hack(struct device *dev, u64 device_mask) 793 { 794 struct iommu *iommu = dev->archdata.iommu; 795 struct pci_dev *ali_isa_bridge; 796 u8 val; 797 798 if (!dev_is_pci(dev)) 799 return 0; 800 801 if (to_pci_dev(dev)->vendor != PCI_VENDOR_ID_AL || 802 to_pci_dev(dev)->device != PCI_DEVICE_ID_AL_M5451 || 803 device_mask != 0x7fffffff) 804 return 0; 805 806 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, 807 PCI_DEVICE_ID_AL_M1533, 808 NULL); 809 810 pci_read_config_byte(ali_isa_bridge, 0x7e, &val); 811 if (iommu->dma_addr_mask & 0x80000000) 812 val |= 0x01; 813 else 814 val &= ~0x01; 815 pci_write_config_byte(ali_isa_bridge, 0x7e, val); 816 pci_dev_put(ali_isa_bridge); 817 return 1; 818 } 819 820 void pci_resource_to_user(const struct pci_dev *pdev, int bar, 821 const struct resource *rp, resource_size_t *start, 822 resource_size_t *end) 823 { 824 struct pci_bus_region region; 825 826 /* 827 * "User" addresses are shown in /sys/devices/pci.../.../resource 828 * and /proc/bus/pci/devices and used as mmap offsets for 829 * /proc/bus/pci/BB/DD.F files (see proc_bus_pci_mmap()). 830 * 831 * On sparc, these are PCI bus addresses, i.e., raw BAR values. 832 */ 833 pcibios_resource_to_bus(pdev->bus, ®ion, (struct resource *) rp); 834 *start = region.start; 835 *end = region.end; 836 } 837 838 void pcibios_set_master(struct pci_dev *dev) 839 { 840 /* No special bus mastering setup handling */ 841 } 842 843 #ifdef CONFIG_PCI_IOV 844 int pcibios_device_add(struct pci_dev *dev) 845 { 846 struct pci_dev *pdev; 847 848 /* Add sriov arch specific initialization here. 849 * Copy dev_archdata from PF to VF 850 */ 851 if (dev->is_virtfn) { 852 struct dev_archdata *psd; 853 854 pdev = dev->physfn; 855 psd = &pdev->dev.archdata; 856 pci_init_dev_archdata(&dev->dev.archdata, psd->iommu, 857 psd->stc, psd->host_controller, NULL, 858 psd->numa_node); 859 } 860 return 0; 861 } 862 #endif /* CONFIG_PCI_IOV */ 863 864 static int __init pcibios_init(void) 865 { 866 pci_dfl_cache_line_size = 64 >> 2; 867 return 0; 868 } 869 subsys_initcall(pcibios_init); 870 871 #ifdef CONFIG_SYSFS 872 873 #define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */ 874 875 static void pcie_bus_slot_names(struct pci_bus *pbus) 876 { 877 struct pci_dev *pdev; 878 struct pci_bus *bus; 879 880 list_for_each_entry(pdev, &pbus->devices, bus_list) { 881 char name[SLOT_NAME_SIZE]; 882 struct pci_slot *pci_slot; 883 const u32 *slot_num; 884 int len; 885 886 slot_num = of_get_property(pdev->dev.of_node, 887 "physical-slot#", &len); 888 889 if (slot_num == NULL || len != 4) 890 continue; 891 892 snprintf(name, sizeof(name), "%u", slot_num[0]); 893 pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL); 894 895 if (IS_ERR(pci_slot)) 896 pr_err("PCI: pci_create_slot returned %ld.\n", 897 PTR_ERR(pci_slot)); 898 } 899 900 list_for_each_entry(bus, &pbus->children, node) 901 pcie_bus_slot_names(bus); 902 } 903 904 static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) 905 { 906 const struct pci_slot_names { 907 u32 slot_mask; 908 char names[]; 909 } *prop; 910 const char *sp; 911 int len, i; 912 u32 mask; 913 914 prop = of_get_property(node, "slot-names", &len); 915 if (!prop) 916 return; 917 918 mask = prop->slot_mask; 919 sp = prop->names; 920 921 if (ofpci_verbose) 922 pci_info(bus, "Making slots for [%pOF] mask[0x%02x]\n", 923 node, mask); 924 925 i = 0; 926 while (mask) { 927 struct pci_slot *pci_slot; 928 u32 this_bit = 1 << i; 929 930 if (!(mask & this_bit)) { 931 i++; 932 continue; 933 } 934 935 if (ofpci_verbose) 936 pci_info(bus, "Making slot [%s]\n", sp); 937 938 pci_slot = pci_create_slot(bus, i, sp, NULL); 939 if (IS_ERR(pci_slot)) 940 pci_err(bus, "pci_create_slot returned %ld\n", 941 PTR_ERR(pci_slot)); 942 943 sp += strlen(sp) + 1; 944 mask &= ~this_bit; 945 i++; 946 } 947 } 948 949 static int __init of_pci_slot_init(void) 950 { 951 struct pci_bus *pbus = NULL; 952 953 while ((pbus = pci_find_next_bus(pbus)) != NULL) { 954 struct device_node *node; 955 struct pci_dev *pdev; 956 957 pdev = list_first_entry(&pbus->devices, struct pci_dev, 958 bus_list); 959 960 if (pdev && pci_is_pcie(pdev)) { 961 pcie_bus_slot_names(pbus); 962 } else { 963 964 if (pbus->self) { 965 966 /* PCI->PCI bridge */ 967 node = pbus->self->dev.of_node; 968 969 } else { 970 struct pci_pbm_info *pbm = pbus->sysdata; 971 972 /* Host PCI controller */ 973 node = pbm->op->dev.of_node; 974 } 975 976 pci_bus_slot_names(node, pbus); 977 } 978 } 979 980 return 0; 981 } 982 device_initcall(of_pci_slot_init); 983 #endif 984