1 /* 2 * MPC83xx/85xx/86xx PCI/PCIE support routing. 3 * 4 * Copyright 2007-2012 Freescale Semiconductor, Inc. 5 * Copyright 2008-2009 MontaVista Software, Inc. 6 * 7 * Initial author: Xianghua Xiao <x.xiao@freescale.com> 8 * Recode: ZHANG WEI <wei.zhang@freescale.com> 9 * Rewrite the routing for Frescale PCI and PCI Express 10 * Roy Zang <tie-fei.zang@freescale.com> 11 * MPC83xx PCI-Express support: 12 * Tony Li <tony.li@freescale.com> 13 * Anton Vorontsov <avorontsov@ru.mvista.com> 14 * 15 * This program is free software; you can redistribute it and/or modify it 16 * under the terms of the GNU General Public License as published by the 17 * Free Software Foundation; either version 2 of the License, or (at your 18 * option) any later version. 19 */ 20 #include <linux/kernel.h> 21 #include <linux/pci.h> 22 #include <linux/delay.h> 23 #include <linux/string.h> 24 #include <linux/fsl/edac.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/memblock.h> 28 #include <linux/log2.h> 29 #include <linux/platform_device.h> 30 #include <linux/slab.h> 31 #include <linux/suspend.h> 32 #include <linux/syscore_ops.h> 33 #include <linux/uaccess.h> 34 35 #include <asm/io.h> 36 #include <asm/prom.h> 37 #include <asm/pci-bridge.h> 38 #include <asm/ppc-pci.h> 39 #include <asm/machdep.h> 40 #include <asm/mpc85xx.h> 41 #include <asm/disassemble.h> 42 #include <asm/ppc-opcode.h> 43 #include <sysdev/fsl_soc.h> 44 #include <sysdev/fsl_pci.h> 45 46 static int fsl_pcie_bus_fixup, is_mpc83xx_pci; 47 48 static void quirk_fsl_pcie_early(struct pci_dev *dev) 49 { 50 u8 hdr_type; 51 52 /* if we aren't a PCIe don't bother */ 53 if (!pci_is_pcie(dev)) 54 return; 55 56 /* if we aren't in host mode don't bother */ 57 pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type); 58 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) 59 return; 60 61 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 62 fsl_pcie_bus_fixup = 1; 63 return; 64 } 65 66 static int fsl_indirect_read_config(struct pci_bus *, unsigned int, 67 int, int, u32 *); 68 69 static int fsl_pcie_check_link(struct pci_controller *hose) 70 { 71 u32 val = 0; 72 73 if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) { 74 if (hose->ops->read == fsl_indirect_read_config) 75 __indirect_read_config(hose, hose->first_busno, 0, 76 PCIE_LTSSM, 4, &val); 77 else 78 early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val); 79 if (val < PCIE_LTSSM_L0) 80 return 1; 81 } else { 82 struct ccsr_pci __iomem *pci = hose->private_data; 83 /* for PCIe IP rev 3.0 or greater use CSR0 for link state */ 84 val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK) 85 >> PEX_CSR0_LTSSM_SHIFT; 86 if (val != PEX_CSR0_LTSSM_L0) 87 return 1; 88 } 89 90 return 0; 91 } 92 93 static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn, 94 int offset, int len, u32 *val) 95 { 96 struct pci_controller *hose = pci_bus_to_host(bus); 97 98 if (fsl_pcie_check_link(hose)) 99 hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; 100 else 101 hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK; 102 103 return indirect_read_config(bus, devfn, offset, len, val); 104 } 105 106 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) 107 108 static struct pci_ops fsl_indirect_pcie_ops = 109 { 110 .read = fsl_indirect_read_config, 111 .write = indirect_write_config, 112 }; 113 114 #define MAX_PHYS_ADDR_BITS 40 115 static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; 116 117 #ifdef CONFIG_SWIOTLB 118 static void setup_swiotlb_ops(struct pci_controller *hose) 119 { 120 if (ppc_swiotlb_enable) { 121 hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb; 122 set_pci_dma_ops(&swiotlb_dma_ops); 123 } 124 } 125 #else 126 static inline void setup_swiotlb_ops(struct pci_controller *hose) {} 127 #endif 128 129 static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask) 130 { 131 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 132 return -EIO; 133 134 /* 135 * Fixup PCI devices that are able to DMA to above the physical 136 * address width of the SoC such that we can address any internal 137 * SoC address from across PCI if needed 138 */ 139 if ((dev_is_pci(dev)) && 140 dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) { 141 set_dma_ops(dev, &dma_direct_ops); 142 set_dma_offset(dev, pci64_dma_offset); 143 } 144 145 *dev->dma_mask = dma_mask; 146 return 0; 147 } 148 149 static int setup_one_atmu(struct ccsr_pci __iomem *pci, 150 unsigned int index, const struct resource *res, 151 resource_size_t offset) 152 { 153 resource_size_t pci_addr = res->start - offset; 154 resource_size_t phys_addr = res->start; 155 resource_size_t size = resource_size(res); 156 u32 flags = 0x80044000; /* enable & mem R/W */ 157 unsigned int i; 158 159 pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n", 160 (u64)res->start, (u64)size); 161 162 if (res->flags & IORESOURCE_PREFETCH) 163 flags |= 0x10000000; /* enable relaxed ordering */ 164 165 for (i = 0; size > 0; i++) { 166 unsigned int bits = min_t(u32, ilog2(size), 167 __ffs(pci_addr | phys_addr)); 168 169 if (index + i >= 5) 170 return -1; 171 172 out_be32(&pci->pow[index + i].potar, pci_addr >> 12); 173 out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44); 174 out_be32(&pci->pow[index + i].powbar, phys_addr >> 12); 175 out_be32(&pci->pow[index + i].powar, flags | (bits - 1)); 176 177 pci_addr += (resource_size_t)1U << bits; 178 phys_addr += (resource_size_t)1U << bits; 179 size -= (resource_size_t)1U << bits; 180 } 181 182 return i; 183 } 184 185 static bool is_kdump(void) 186 { 187 struct device_node *node; 188 189 node = of_find_node_by_type(NULL, "memory"); 190 if (!node) { 191 WARN_ON_ONCE(1); 192 return false; 193 } 194 195 return of_property_read_bool(node, "linux,usable-memory"); 196 } 197 198 /* atmu setup for fsl pci/pcie controller */ 199 static void setup_pci_atmu(struct pci_controller *hose) 200 { 201 struct ccsr_pci __iomem *pci = hose->private_data; 202 int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4; 203 u64 mem, sz, paddr_hi = 0; 204 u64 offset = 0, paddr_lo = ULLONG_MAX; 205 u32 pcicsrbar = 0, pcicsrbar_sz; 206 u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL | 207 PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP; 208 const char *name = hose->dn->full_name; 209 const u64 *reg; 210 int len; 211 bool setup_inbound; 212 213 /* 214 * If this is kdump, we don't want to trigger a bunch of PCI 215 * errors by closing the window on in-flight DMA. 216 * 217 * We still run most of the function's logic so that things like 218 * hose->dma_window_size still get set. 219 */ 220 setup_inbound = !is_kdump(); 221 222 if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) { 223 /* 224 * BSC9132 Rev1.0 has an issue where all the PEX inbound 225 * windows have implemented the default target value as 0xf 226 * for CCSR space.In all Freescale legacy devices the target 227 * of 0xf is reserved for local memory space. 9132 Rev1.0 228 * now has local mempry space mapped to target 0x0 instead of 229 * 0xf. Hence adding a workaround to remove the target 0xf 230 * defined for memory space from Inbound window attributes. 231 */ 232 piwar &= ~PIWAR_TGI_LOCAL; 233 } 234 235 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { 236 if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) { 237 win_idx = 2; 238 start_idx = 0; 239 end_idx = 3; 240 } 241 } 242 243 /* Disable all windows (except powar0 since it's ignored) */ 244 for(i = 1; i < 5; i++) 245 out_be32(&pci->pow[i].powar, 0); 246 247 if (setup_inbound) { 248 for (i = start_idx; i < end_idx; i++) 249 out_be32(&pci->piw[i].piwar, 0); 250 } 251 252 /* Setup outbound MEM window */ 253 for(i = 0, j = 1; i < 3; i++) { 254 if (!(hose->mem_resources[i].flags & IORESOURCE_MEM)) 255 continue; 256 257 paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start); 258 paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end); 259 260 /* We assume all memory resources have the same offset */ 261 offset = hose->mem_offset[i]; 262 n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset); 263 264 if (n < 0 || j >= 5) { 265 pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i); 266 hose->mem_resources[i].flags |= IORESOURCE_DISABLED; 267 } else 268 j += n; 269 } 270 271 /* Setup outbound IO window */ 272 if (hose->io_resource.flags & IORESOURCE_IO) { 273 if (j >= 5) { 274 pr_err("Ran out of outbound PCI ATMUs for IO resource\n"); 275 } else { 276 pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, " 277 "phy base 0x%016llx.\n", 278 (u64)hose->io_resource.start, 279 (u64)resource_size(&hose->io_resource), 280 (u64)hose->io_base_phys); 281 out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12)); 282 out_be32(&pci->pow[j].potear, 0); 283 out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12)); 284 /* Enable, IO R/W */ 285 out_be32(&pci->pow[j].powar, 0x80088000 286 | (ilog2(hose->io_resource.end 287 - hose->io_resource.start + 1) - 1)); 288 } 289 } 290 291 /* convert to pci address space */ 292 paddr_hi -= offset; 293 paddr_lo -= offset; 294 295 if (paddr_hi == paddr_lo) { 296 pr_err("%s: No outbound window space\n", name); 297 return; 298 } 299 300 if (paddr_lo == 0) { 301 pr_err("%s: No space for inbound window\n", name); 302 return; 303 } 304 305 /* setup PCSRBAR/PEXCSRBAR */ 306 early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff); 307 early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz); 308 pcicsrbar_sz = ~pcicsrbar_sz + 1; 309 310 if (paddr_hi < (0x100000000ull - pcicsrbar_sz) || 311 (paddr_lo > 0x100000000ull)) 312 pcicsrbar = 0x100000000ull - pcicsrbar_sz; 313 else 314 pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz; 315 early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar); 316 317 paddr_lo = min(paddr_lo, (u64)pcicsrbar); 318 319 pr_info("%s: PCICSRBAR @ 0x%x\n", name, pcicsrbar); 320 321 /* Setup inbound mem window */ 322 mem = memblock_end_of_DRAM(); 323 pr_info("%s: end of DRAM %llx\n", __func__, mem); 324 325 /* 326 * The msi-address-64 property, if it exists, indicates the physical 327 * address of the MSIIR register. Normally, this register is located 328 * inside CCSR, so the ATMU that covers all of CCSR is used. But if 329 * this property exists, then we normally need to create a new ATMU 330 * for it. For now, however, we cheat. The only entity that creates 331 * this property is the Freescale hypervisor, and the address is 332 * specified in the partition configuration. Typically, the address 333 * is located in the page immediately after the end of DDR. If so, we 334 * can avoid allocating a new ATMU by extending the DDR ATMU by one 335 * page. 336 */ 337 reg = of_get_property(hose->dn, "msi-address-64", &len); 338 if (reg && (len == sizeof(u64))) { 339 u64 address = be64_to_cpup(reg); 340 341 if ((address >= mem) && (address < (mem + PAGE_SIZE))) { 342 pr_info("%s: extending DDR ATMU to cover MSIIR", name); 343 mem += PAGE_SIZE; 344 } else { 345 /* TODO: Create a new ATMU for MSIIR */ 346 pr_warn("%s: msi-address-64 address of %llx is " 347 "unsupported\n", name, address); 348 } 349 } 350 351 sz = min(mem, paddr_lo); 352 mem_log = ilog2(sz); 353 354 /* PCIe can overmap inbound & outbound since RX & TX are separated */ 355 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { 356 /* Size window to exact size if power-of-two or one size up */ 357 if ((1ull << mem_log) != mem) { 358 mem_log++; 359 if ((1ull << mem_log) > mem) 360 pr_info("%s: Setting PCI inbound window " 361 "greater than memory size\n", name); 362 } 363 364 piwar |= ((mem_log - 1) & PIWAR_SZ_MASK); 365 366 if (setup_inbound) { 367 /* Setup inbound memory window */ 368 out_be32(&pci->piw[win_idx].pitar, 0x00000000); 369 out_be32(&pci->piw[win_idx].piwbar, 0x00000000); 370 out_be32(&pci->piw[win_idx].piwar, piwar); 371 } 372 373 win_idx--; 374 hose->dma_window_base_cur = 0x00000000; 375 hose->dma_window_size = (resource_size_t)sz; 376 377 /* 378 * if we have >4G of memory setup second PCI inbound window to 379 * let devices that are 64-bit address capable to work w/o 380 * SWIOTLB and access the full range of memory 381 */ 382 if (sz != mem) { 383 mem_log = ilog2(mem); 384 385 /* Size window up if we dont fit in exact power-of-2 */ 386 if ((1ull << mem_log) != mem) 387 mem_log++; 388 389 piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1); 390 391 if (setup_inbound) { 392 /* Setup inbound memory window */ 393 out_be32(&pci->piw[win_idx].pitar, 0x00000000); 394 out_be32(&pci->piw[win_idx].piwbear, 395 pci64_dma_offset >> 44); 396 out_be32(&pci->piw[win_idx].piwbar, 397 pci64_dma_offset >> 12); 398 out_be32(&pci->piw[win_idx].piwar, piwar); 399 } 400 401 /* 402 * install our own dma_set_mask handler to fixup dma_ops 403 * and dma_offset 404 */ 405 ppc_md.dma_set_mask = fsl_pci_dma_set_mask; 406 407 pr_info("%s: Setup 64-bit PCI DMA window\n", name); 408 } 409 } else { 410 u64 paddr = 0; 411 412 if (setup_inbound) { 413 /* Setup inbound memory window */ 414 out_be32(&pci->piw[win_idx].pitar, paddr >> 12); 415 out_be32(&pci->piw[win_idx].piwbar, paddr >> 12); 416 out_be32(&pci->piw[win_idx].piwar, 417 (piwar | (mem_log - 1))); 418 } 419 420 win_idx--; 421 paddr += 1ull << mem_log; 422 sz -= 1ull << mem_log; 423 424 if (sz) { 425 mem_log = ilog2(sz); 426 piwar |= (mem_log - 1); 427 428 if (setup_inbound) { 429 out_be32(&pci->piw[win_idx].pitar, 430 paddr >> 12); 431 out_be32(&pci->piw[win_idx].piwbar, 432 paddr >> 12); 433 out_be32(&pci->piw[win_idx].piwar, piwar); 434 } 435 436 win_idx--; 437 paddr += 1ull << mem_log; 438 } 439 440 hose->dma_window_base_cur = 0x00000000; 441 hose->dma_window_size = (resource_size_t)paddr; 442 } 443 444 if (hose->dma_window_size < mem) { 445 #ifdef CONFIG_SWIOTLB 446 ppc_swiotlb_enable = 1; 447 #else 448 pr_err("%s: ERROR: Memory size exceeds PCI ATMU ability to " 449 "map - enable CONFIG_SWIOTLB to avoid dma errors.\n", 450 name); 451 #endif 452 /* adjusting outbound windows could reclaim space in mem map */ 453 if (paddr_hi < 0xffffffffull) 454 pr_warning("%s: WARNING: Outbound window cfg leaves " 455 "gaps in memory map. Adjusting the memory map " 456 "could reduce unnecessary bounce buffering.\n", 457 name); 458 459 pr_info("%s: DMA window size is 0x%llx\n", name, 460 (u64)hose->dma_window_size); 461 } 462 } 463 464 static void __init setup_pci_cmd(struct pci_controller *hose) 465 { 466 u16 cmd; 467 int cap_x; 468 469 early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd); 470 cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY 471 | PCI_COMMAND_IO; 472 early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd); 473 474 cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX); 475 if (cap_x) { 476 int pci_x_cmd = cap_x + PCI_X_CMD; 477 cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ 478 | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E; 479 early_write_config_word(hose, 0, 0, pci_x_cmd, cmd); 480 } else { 481 early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80); 482 } 483 } 484 485 void fsl_pcibios_fixup_bus(struct pci_bus *bus) 486 { 487 struct pci_controller *hose = pci_bus_to_host(bus); 488 int i, is_pcie = 0, no_link; 489 490 /* The root complex bridge comes up with bogus resources, 491 * we copy the PHB ones in. 492 * 493 * With the current generic PCI code, the PHB bus no longer 494 * has bus->resource[0..4] set, so things are a bit more 495 * tricky. 496 */ 497 498 if (fsl_pcie_bus_fixup) 499 is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP); 500 no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK); 501 502 if (bus->parent == hose->bus && (is_pcie || no_link)) { 503 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) { 504 struct resource *res = bus->resource[i]; 505 struct resource *par; 506 507 if (!res) 508 continue; 509 if (i == 0) 510 par = &hose->io_resource; 511 else if (i < 4) 512 par = &hose->mem_resources[i-1]; 513 else par = NULL; 514 515 res->start = par ? par->start : 0; 516 res->end = par ? par->end : 0; 517 res->flags = par ? par->flags : 0; 518 } 519 } 520 } 521 522 int fsl_add_bridge(struct platform_device *pdev, int is_primary) 523 { 524 int len; 525 struct pci_controller *hose; 526 struct resource rsrc; 527 const int *bus_range; 528 u8 hdr_type, progif; 529 struct device_node *dev; 530 struct ccsr_pci __iomem *pci; 531 u16 temp; 532 u32 svr = mfspr(SPRN_SVR); 533 534 dev = pdev->dev.of_node; 535 536 if (!of_device_is_available(dev)) { 537 pr_warning("%s: disabled\n", dev->full_name); 538 return -ENODEV; 539 } 540 541 pr_debug("Adding PCI host bridge %s\n", dev->full_name); 542 543 /* Fetch host bridge registers address */ 544 if (of_address_to_resource(dev, 0, &rsrc)) { 545 printk(KERN_WARNING "Can't get pci register base!"); 546 return -ENOMEM; 547 } 548 549 /* Get bus range if any */ 550 bus_range = of_get_property(dev, "bus-range", &len); 551 if (bus_range == NULL || len < 2 * sizeof(int)) 552 printk(KERN_WARNING "Can't get bus-range for %s, assume" 553 " bus 0\n", dev->full_name); 554 555 pci_add_flags(PCI_REASSIGN_ALL_BUS); 556 hose = pcibios_alloc_controller(dev); 557 if (!hose) 558 return -ENOMEM; 559 560 /* set platform device as the parent */ 561 hose->parent = &pdev->dev; 562 hose->first_busno = bus_range ? bus_range[0] : 0x0; 563 hose->last_busno = bus_range ? bus_range[1] : 0xff; 564 565 pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", 566 (u64)rsrc.start, (u64)resource_size(&rsrc)); 567 568 pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc)); 569 if (!hose->private_data) 570 goto no_bridge; 571 572 setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, 573 PPC_INDIRECT_TYPE_BIG_ENDIAN); 574 575 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) 576 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; 577 578 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { 579 /* use fsl_indirect_read_config for PCIe */ 580 hose->ops = &fsl_indirect_pcie_ops; 581 /* For PCIE read HEADER_TYPE to identify controller mode */ 582 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); 583 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) 584 goto no_bridge; 585 586 } else { 587 /* For PCI read PROG to identify controller mode */ 588 early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif); 589 if ((progif & 1) && 590 !of_property_read_bool(dev, "fsl,pci-agent-force-enum")) 591 goto no_bridge; 592 } 593 594 setup_pci_cmd(hose); 595 596 /* check PCI express link status */ 597 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { 598 hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG | 599 PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS; 600 if (fsl_pcie_check_link(hose)) 601 hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; 602 } else { 603 /* 604 * Set PBFR(PCI Bus Function Register)[10] = 1 to 605 * disable the combining of crossing cacheline 606 * boundary requests into one burst transaction. 607 * PCI-X operation is not affected. 608 * Fix erratum PCI 5 on MPC8548 609 */ 610 #define PCI_BUS_FUNCTION 0x44 611 #define PCI_BUS_FUNCTION_MDS 0x400 /* Master disable streaming */ 612 if (((SVR_SOC_VER(svr) == SVR_8543) || 613 (SVR_SOC_VER(svr) == SVR_8545) || 614 (SVR_SOC_VER(svr) == SVR_8547) || 615 (SVR_SOC_VER(svr) == SVR_8548)) && 616 !early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) { 617 early_read_config_word(hose, 0, 0, 618 PCI_BUS_FUNCTION, &temp); 619 temp |= PCI_BUS_FUNCTION_MDS; 620 early_write_config_word(hose, 0, 0, 621 PCI_BUS_FUNCTION, temp); 622 } 623 } 624 625 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " 626 "Firmware bus number: %d->%d\n", 627 (unsigned long long)rsrc.start, hose->first_busno, 628 hose->last_busno); 629 630 pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", 631 hose, hose->cfg_addr, hose->cfg_data); 632 633 /* Interpret the "ranges" property */ 634 /* This also maps the I/O region and sets isa_io/mem_base */ 635 pci_process_bridge_OF_ranges(hose, dev, is_primary); 636 637 /* Setup PEX window registers */ 638 setup_pci_atmu(hose); 639 640 /* Set up controller operations */ 641 setup_swiotlb_ops(hose); 642 643 return 0; 644 645 no_bridge: 646 iounmap(hose->private_data); 647 /* unmap cfg_data & cfg_addr separately if not on same page */ 648 if (((unsigned long)hose->cfg_data & PAGE_MASK) != 649 ((unsigned long)hose->cfg_addr & PAGE_MASK)) 650 iounmap(hose->cfg_data); 651 iounmap(hose->cfg_addr); 652 pcibios_free_controller(hose); 653 return -ENODEV; 654 } 655 #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ 656 657 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, 658 quirk_fsl_pcie_early); 659 660 #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) 661 struct mpc83xx_pcie_priv { 662 void __iomem *cfg_type0; 663 void __iomem *cfg_type1; 664 u32 dev_base; 665 }; 666 667 struct pex_inbound_window { 668 u32 ar; 669 u32 tar; 670 u32 barl; 671 u32 barh; 672 }; 673 674 /* 675 * With the convention of u-boot, the PCIE outbound window 0 serves 676 * as configuration transactions outbound. 677 */ 678 #define PEX_OUTWIN0_BAR 0xCA4 679 #define PEX_OUTWIN0_TAL 0xCA8 680 #define PEX_OUTWIN0_TAH 0xCAC 681 #define PEX_RC_INWIN_BASE 0xE60 682 #define PEX_RCIWARn_EN 0x1 683 684 static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn) 685 { 686 struct pci_controller *hose = pci_bus_to_host(bus); 687 688 if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) 689 return PCIBIOS_DEVICE_NOT_FOUND; 690 /* 691 * Workaround for the HW bug: for Type 0 configure transactions the 692 * PCI-E controller does not check the device number bits and just 693 * assumes that the device number bits are 0. 694 */ 695 if (bus->number == hose->first_busno || 696 bus->primary == hose->first_busno) { 697 if (devfn & 0xf8) 698 return PCIBIOS_DEVICE_NOT_FOUND; 699 } 700 701 if (ppc_md.pci_exclude_device) { 702 if (ppc_md.pci_exclude_device(hose, bus->number, devfn)) 703 return PCIBIOS_DEVICE_NOT_FOUND; 704 } 705 706 return PCIBIOS_SUCCESSFUL; 707 } 708 709 static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus, 710 unsigned int devfn, int offset) 711 { 712 struct pci_controller *hose = pci_bus_to_host(bus); 713 struct mpc83xx_pcie_priv *pcie = hose->dn->data; 714 u32 dev_base = bus->number << 24 | devfn << 16; 715 int ret; 716 717 ret = mpc83xx_pcie_exclude_device(bus, devfn); 718 if (ret) 719 return NULL; 720 721 offset &= 0xfff; 722 723 /* Type 0 */ 724 if (bus->number == hose->first_busno) 725 return pcie->cfg_type0 + offset; 726 727 if (pcie->dev_base == dev_base) 728 goto mapped; 729 730 out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base); 731 732 pcie->dev_base = dev_base; 733 mapped: 734 return pcie->cfg_type1 + offset; 735 } 736 737 static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, 738 int offset, int len, u32 val) 739 { 740 struct pci_controller *hose = pci_bus_to_host(bus); 741 742 /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */ 743 if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno) 744 val &= 0xffffff00; 745 746 return pci_generic_config_write(bus, devfn, offset, len, val); 747 } 748 749 static struct pci_ops mpc83xx_pcie_ops = { 750 .map_bus = mpc83xx_pcie_remap_cfg, 751 .read = pci_generic_config_read, 752 .write = mpc83xx_pcie_write_config, 753 }; 754 755 static int __init mpc83xx_pcie_setup(struct pci_controller *hose, 756 struct resource *reg) 757 { 758 struct mpc83xx_pcie_priv *pcie; 759 u32 cfg_bar; 760 int ret = -ENOMEM; 761 762 pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL); 763 if (!pcie) 764 return ret; 765 766 pcie->cfg_type0 = ioremap(reg->start, resource_size(reg)); 767 if (!pcie->cfg_type0) 768 goto err0; 769 770 cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR); 771 if (!cfg_bar) { 772 /* PCI-E isn't configured. */ 773 ret = -ENODEV; 774 goto err1; 775 } 776 777 pcie->cfg_type1 = ioremap(cfg_bar, 0x1000); 778 if (!pcie->cfg_type1) 779 goto err1; 780 781 WARN_ON(hose->dn->data); 782 hose->dn->data = pcie; 783 hose->ops = &mpc83xx_pcie_ops; 784 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; 785 786 out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0); 787 out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0); 788 789 if (fsl_pcie_check_link(hose)) 790 hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; 791 792 return 0; 793 err1: 794 iounmap(pcie->cfg_type0); 795 err0: 796 kfree(pcie); 797 return ret; 798 799 } 800 801 int __init mpc83xx_add_bridge(struct device_node *dev) 802 { 803 int ret; 804 int len; 805 struct pci_controller *hose; 806 struct resource rsrc_reg; 807 struct resource rsrc_cfg; 808 const int *bus_range; 809 int primary; 810 811 is_mpc83xx_pci = 1; 812 813 if (!of_device_is_available(dev)) { 814 pr_warning("%s: disabled by the firmware.\n", 815 dev->full_name); 816 return -ENODEV; 817 } 818 pr_debug("Adding PCI host bridge %s\n", dev->full_name); 819 820 /* Fetch host bridge registers address */ 821 if (of_address_to_resource(dev, 0, &rsrc_reg)) { 822 printk(KERN_WARNING "Can't get pci register base!\n"); 823 return -ENOMEM; 824 } 825 826 memset(&rsrc_cfg, 0, sizeof(rsrc_cfg)); 827 828 if (of_address_to_resource(dev, 1, &rsrc_cfg)) { 829 printk(KERN_WARNING 830 "No pci config register base in dev tree, " 831 "using default\n"); 832 /* 833 * MPC83xx supports up to two host controllers 834 * one at 0x8500 has config space registers at 0x8300 835 * one at 0x8600 has config space registers at 0x8380 836 */ 837 if ((rsrc_reg.start & 0xfffff) == 0x8500) 838 rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300; 839 else if ((rsrc_reg.start & 0xfffff) == 0x8600) 840 rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380; 841 } 842 /* 843 * Controller at offset 0x8500 is primary 844 */ 845 if ((rsrc_reg.start & 0xfffff) == 0x8500) 846 primary = 1; 847 else 848 primary = 0; 849 850 /* Get bus range if any */ 851 bus_range = of_get_property(dev, "bus-range", &len); 852 if (bus_range == NULL || len < 2 * sizeof(int)) { 853 printk(KERN_WARNING "Can't get bus-range for %s, assume" 854 " bus 0\n", dev->full_name); 855 } 856 857 pci_add_flags(PCI_REASSIGN_ALL_BUS); 858 hose = pcibios_alloc_controller(dev); 859 if (!hose) 860 return -ENOMEM; 861 862 hose->first_busno = bus_range ? bus_range[0] : 0; 863 hose->last_busno = bus_range ? bus_range[1] : 0xff; 864 865 if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) { 866 ret = mpc83xx_pcie_setup(hose, &rsrc_reg); 867 if (ret) 868 goto err0; 869 } else { 870 setup_indirect_pci(hose, rsrc_cfg.start, 871 rsrc_cfg.start + 4, 0); 872 } 873 874 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " 875 "Firmware bus number: %d->%d\n", 876 (unsigned long long)rsrc_reg.start, hose->first_busno, 877 hose->last_busno); 878 879 pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", 880 hose, hose->cfg_addr, hose->cfg_data); 881 882 /* Interpret the "ranges" property */ 883 /* This also maps the I/O region and sets isa_io/mem_base */ 884 pci_process_bridge_OF_ranges(hose, dev, primary); 885 886 return 0; 887 err0: 888 pcibios_free_controller(hose); 889 return ret; 890 } 891 #endif /* CONFIG_PPC_83xx */ 892 893 u64 fsl_pci_immrbar_base(struct pci_controller *hose) 894 { 895 #ifdef CONFIG_PPC_83xx 896 if (is_mpc83xx_pci) { 897 struct mpc83xx_pcie_priv *pcie = hose->dn->data; 898 struct pex_inbound_window *in; 899 int i; 900 901 /* Walk the Root Complex Inbound windows to match IMMR base */ 902 in = pcie->cfg_type0 + PEX_RC_INWIN_BASE; 903 for (i = 0; i < 4; i++) { 904 /* not enabled, skip */ 905 if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN)) 906 continue; 907 908 if (get_immrbase() == in_le32(&in[i].tar)) 909 return (u64)in_le32(&in[i].barh) << 32 | 910 in_le32(&in[i].barl); 911 } 912 913 printk(KERN_WARNING "could not find PCI BAR matching IMMR\n"); 914 } 915 #endif 916 917 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) 918 if (!is_mpc83xx_pci) { 919 u32 base; 920 921 pci_bus_read_config_dword(hose->bus, 922 PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base); 923 924 /* 925 * For PEXCSRBAR, bit 3-0 indicate prefetchable and 926 * address type. So when getting base address, these 927 * bits should be masked 928 */ 929 base &= PCI_BASE_ADDRESS_MEM_MASK; 930 931 return base; 932 } 933 #endif 934 935 return 0; 936 } 937 938 #ifdef CONFIG_E500 939 static int mcheck_handle_load(struct pt_regs *regs, u32 inst) 940 { 941 unsigned int rd, ra, rb, d; 942 943 rd = get_rt(inst); 944 ra = get_ra(inst); 945 rb = get_rb(inst); 946 d = get_d(inst); 947 948 switch (get_op(inst)) { 949 case 31: 950 switch (get_xop(inst)) { 951 case OP_31_XOP_LWZX: 952 case OP_31_XOP_LWBRX: 953 regs->gpr[rd] = 0xffffffff; 954 break; 955 956 case OP_31_XOP_LWZUX: 957 regs->gpr[rd] = 0xffffffff; 958 regs->gpr[ra] += regs->gpr[rb]; 959 break; 960 961 case OP_31_XOP_LBZX: 962 regs->gpr[rd] = 0xff; 963 break; 964 965 case OP_31_XOP_LBZUX: 966 regs->gpr[rd] = 0xff; 967 regs->gpr[ra] += regs->gpr[rb]; 968 break; 969 970 case OP_31_XOP_LHZX: 971 case OP_31_XOP_LHBRX: 972 regs->gpr[rd] = 0xffff; 973 break; 974 975 case OP_31_XOP_LHZUX: 976 regs->gpr[rd] = 0xffff; 977 regs->gpr[ra] += regs->gpr[rb]; 978 break; 979 980 case OP_31_XOP_LHAX: 981 regs->gpr[rd] = ~0UL; 982 break; 983 984 case OP_31_XOP_LHAUX: 985 regs->gpr[rd] = ~0UL; 986 regs->gpr[ra] += regs->gpr[rb]; 987 break; 988 989 default: 990 return 0; 991 } 992 break; 993 994 case OP_LWZ: 995 regs->gpr[rd] = 0xffffffff; 996 break; 997 998 case OP_LWZU: 999 regs->gpr[rd] = 0xffffffff; 1000 regs->gpr[ra] += (s16)d; 1001 break; 1002 1003 case OP_LBZ: 1004 regs->gpr[rd] = 0xff; 1005 break; 1006 1007 case OP_LBZU: 1008 regs->gpr[rd] = 0xff; 1009 regs->gpr[ra] += (s16)d; 1010 break; 1011 1012 case OP_LHZ: 1013 regs->gpr[rd] = 0xffff; 1014 break; 1015 1016 case OP_LHZU: 1017 regs->gpr[rd] = 0xffff; 1018 regs->gpr[ra] += (s16)d; 1019 break; 1020 1021 case OP_LHA: 1022 regs->gpr[rd] = ~0UL; 1023 break; 1024 1025 case OP_LHAU: 1026 regs->gpr[rd] = ~0UL; 1027 regs->gpr[ra] += (s16)d; 1028 break; 1029 1030 default: 1031 return 0; 1032 } 1033 1034 return 1; 1035 } 1036 1037 static int is_in_pci_mem_space(phys_addr_t addr) 1038 { 1039 struct pci_controller *hose; 1040 struct resource *res; 1041 int i; 1042 1043 list_for_each_entry(hose, &hose_list, list_node) { 1044 if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG)) 1045 continue; 1046 1047 for (i = 0; i < 3; i++) { 1048 res = &hose->mem_resources[i]; 1049 if ((res->flags & IORESOURCE_MEM) && 1050 addr >= res->start && addr <= res->end) 1051 return 1; 1052 } 1053 } 1054 return 0; 1055 } 1056 1057 int fsl_pci_mcheck_exception(struct pt_regs *regs) 1058 { 1059 u32 inst; 1060 int ret; 1061 phys_addr_t addr = 0; 1062 1063 /* Let KVM/QEMU deal with the exception */ 1064 if (regs->msr & MSR_GS) 1065 return 0; 1066 1067 #ifdef CONFIG_PHYS_64BIT 1068 addr = mfspr(SPRN_MCARU); 1069 addr <<= 32; 1070 #endif 1071 addr += mfspr(SPRN_MCAR); 1072 1073 if (is_in_pci_mem_space(addr)) { 1074 if (user_mode(regs)) { 1075 pagefault_disable(); 1076 ret = get_user(regs->nip, &inst); 1077 pagefault_enable(); 1078 } else { 1079 ret = probe_kernel_address((void *)regs->nip, inst); 1080 } 1081 1082 if (!ret && mcheck_handle_load(regs, inst)) { 1083 regs->nip += 4; 1084 return 1; 1085 } 1086 } 1087 1088 return 0; 1089 } 1090 #endif 1091 1092 #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) 1093 static const struct of_device_id pci_ids[] = { 1094 { .compatible = "fsl,mpc8540-pci", }, 1095 { .compatible = "fsl,mpc8548-pcie", }, 1096 { .compatible = "fsl,mpc8610-pci", }, 1097 { .compatible = "fsl,mpc8641-pcie", }, 1098 { .compatible = "fsl,qoriq-pcie", }, 1099 { .compatible = "fsl,qoriq-pcie-v2.1", }, 1100 { .compatible = "fsl,qoriq-pcie-v2.2", }, 1101 { .compatible = "fsl,qoriq-pcie-v2.3", }, 1102 { .compatible = "fsl,qoriq-pcie-v2.4", }, 1103 { .compatible = "fsl,qoriq-pcie-v3.0", }, 1104 1105 /* 1106 * The following entries are for compatibility with older device 1107 * trees. 1108 */ 1109 { .compatible = "fsl,p1022-pcie", }, 1110 { .compatible = "fsl,p4080-pcie", }, 1111 1112 {}, 1113 }; 1114 1115 struct device_node *fsl_pci_primary; 1116 1117 void fsl_pci_assign_primary(void) 1118 { 1119 struct device_node *np; 1120 1121 /* Callers can specify the primary bus using other means. */ 1122 if (fsl_pci_primary) 1123 return; 1124 1125 /* If a PCI host bridge contains an ISA node, it's primary. */ 1126 np = of_find_node_by_type(NULL, "isa"); 1127 while ((fsl_pci_primary = of_get_parent(np))) { 1128 of_node_put(np); 1129 np = fsl_pci_primary; 1130 1131 if (of_match_node(pci_ids, np) && of_device_is_available(np)) 1132 return; 1133 } 1134 1135 /* 1136 * If there's no PCI host bridge with ISA, arbitrarily 1137 * designate one as primary. This can go away once 1138 * various bugs with primary-less systems are fixed. 1139 */ 1140 for_each_matching_node(np, pci_ids) { 1141 if (of_device_is_available(np)) { 1142 fsl_pci_primary = np; 1143 of_node_put(np); 1144 return; 1145 } 1146 } 1147 } 1148 1149 #ifdef CONFIG_PM_SLEEP 1150 static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id) 1151 { 1152 struct pci_controller *hose = dev_id; 1153 struct ccsr_pci __iomem *pci = hose->private_data; 1154 u32 dr; 1155 1156 dr = in_be32(&pci->pex_pme_mes_dr); 1157 if (!dr) 1158 return IRQ_NONE; 1159 1160 out_be32(&pci->pex_pme_mes_dr, dr); 1161 1162 return IRQ_HANDLED; 1163 } 1164 1165 static int fsl_pci_pme_probe(struct pci_controller *hose) 1166 { 1167 struct ccsr_pci __iomem *pci; 1168 struct pci_dev *dev; 1169 int pme_irq; 1170 int res; 1171 u16 pms; 1172 1173 /* Get hose's pci_dev */ 1174 dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list); 1175 1176 /* PME Disable */ 1177 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms); 1178 pms &= ~PCI_PM_CTRL_PME_ENABLE; 1179 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms); 1180 1181 pme_irq = irq_of_parse_and_map(hose->dn, 0); 1182 if (!pme_irq) { 1183 dev_err(&dev->dev, "Failed to map PME interrupt.\n"); 1184 1185 return -ENXIO; 1186 } 1187 1188 res = devm_request_irq(hose->parent, pme_irq, 1189 fsl_pci_pme_handle, 1190 IRQF_SHARED, 1191 "[PCI] PME", hose); 1192 if (res < 0) { 1193 dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq); 1194 irq_dispose_mapping(pme_irq); 1195 1196 return -ENODEV; 1197 } 1198 1199 pci = hose->private_data; 1200 1201 /* Enable PTOD, ENL23D & EXL23D */ 1202 clrbits32(&pci->pex_pme_mes_disr, 1203 PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D); 1204 1205 out_be32(&pci->pex_pme_mes_ier, 0); 1206 setbits32(&pci->pex_pme_mes_ier, 1207 PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D); 1208 1209 /* PME Enable */ 1210 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms); 1211 pms |= PCI_PM_CTRL_PME_ENABLE; 1212 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms); 1213 1214 return 0; 1215 } 1216 1217 static void send_pme_turnoff_message(struct pci_controller *hose) 1218 { 1219 struct ccsr_pci __iomem *pci = hose->private_data; 1220 u32 dr; 1221 int i; 1222 1223 /* Send PME_Turn_Off Message Request */ 1224 setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR); 1225 1226 /* Wait trun off done */ 1227 for (i = 0; i < 150; i++) { 1228 dr = in_be32(&pci->pex_pme_mes_dr); 1229 if (dr) { 1230 out_be32(&pci->pex_pme_mes_dr, dr); 1231 break; 1232 } 1233 1234 udelay(1000); 1235 } 1236 } 1237 1238 static void fsl_pci_syscore_do_suspend(struct pci_controller *hose) 1239 { 1240 send_pme_turnoff_message(hose); 1241 } 1242 1243 static int fsl_pci_syscore_suspend(void) 1244 { 1245 struct pci_controller *hose, *tmp; 1246 1247 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1248 fsl_pci_syscore_do_suspend(hose); 1249 1250 return 0; 1251 } 1252 1253 static void fsl_pci_syscore_do_resume(struct pci_controller *hose) 1254 { 1255 struct ccsr_pci __iomem *pci = hose->private_data; 1256 u32 dr; 1257 int i; 1258 1259 /* Send Exit L2 State Message */ 1260 setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S); 1261 1262 /* Wait exit done */ 1263 for (i = 0; i < 150; i++) { 1264 dr = in_be32(&pci->pex_pme_mes_dr); 1265 if (dr) { 1266 out_be32(&pci->pex_pme_mes_dr, dr); 1267 break; 1268 } 1269 1270 udelay(1000); 1271 } 1272 1273 setup_pci_atmu(hose); 1274 } 1275 1276 static void fsl_pci_syscore_resume(void) 1277 { 1278 struct pci_controller *hose, *tmp; 1279 1280 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1281 fsl_pci_syscore_do_resume(hose); 1282 } 1283 1284 static struct syscore_ops pci_syscore_pm_ops = { 1285 .suspend = fsl_pci_syscore_suspend, 1286 .resume = fsl_pci_syscore_resume, 1287 }; 1288 #endif 1289 1290 void fsl_pcibios_fixup_phb(struct pci_controller *phb) 1291 { 1292 #ifdef CONFIG_PM_SLEEP 1293 fsl_pci_pme_probe(phb); 1294 #endif 1295 } 1296 1297 static int add_err_dev(struct platform_device *pdev) 1298 { 1299 struct platform_device *errdev; 1300 struct mpc85xx_edac_pci_plat_data pd = { 1301 .of_node = pdev->dev.of_node 1302 }; 1303 1304 errdev = platform_device_register_resndata(&pdev->dev, 1305 "mpc85xx-pci-edac", 1306 PLATFORM_DEVID_AUTO, 1307 pdev->resource, 1308 pdev->num_resources, 1309 &pd, sizeof(pd)); 1310 if (IS_ERR(errdev)) 1311 return PTR_ERR(errdev); 1312 1313 return 0; 1314 } 1315 1316 static int fsl_pci_probe(struct platform_device *pdev) 1317 { 1318 struct device_node *node; 1319 int ret; 1320 1321 node = pdev->dev.of_node; 1322 ret = fsl_add_bridge(pdev, fsl_pci_primary == node); 1323 if (ret) 1324 return ret; 1325 1326 ret = add_err_dev(pdev); 1327 if (ret) 1328 dev_err(&pdev->dev, "couldn't register error device: %d\n", 1329 ret); 1330 1331 return 0; 1332 } 1333 1334 static struct platform_driver fsl_pci_driver = { 1335 .driver = { 1336 .name = "fsl-pci", 1337 .of_match_table = pci_ids, 1338 }, 1339 .probe = fsl_pci_probe, 1340 }; 1341 1342 static int __init fsl_pci_init(void) 1343 { 1344 #ifdef CONFIG_PM_SLEEP 1345 register_syscore_ops(&pci_syscore_pm_ops); 1346 #endif 1347 return platform_driver_register(&fsl_pci_driver); 1348 } 1349 arch_initcall(fsl_pci_init); 1350 #endif 1351