1 /* 2 * Support PCI/PCIe on PowerNV platforms 3 * 4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/pci.h> 14 #include <linux/delay.h> 15 #include <linux/string.h> 16 #include <linux/init.h> 17 #include <linux/irq.h> 18 #include <linux/io.h> 19 #include <linux/msi.h> 20 #include <linux/iommu.h> 21 #include <linux/sched/mm.h> 22 23 #include <asm/sections.h> 24 #include <asm/io.h> 25 #include <asm/prom.h> 26 #include <asm/pci-bridge.h> 27 #include <asm/machdep.h> 28 #include <asm/msi_bitmap.h> 29 #include <asm/ppc-pci.h> 30 #include <asm/pnv-pci.h> 31 #include <asm/opal.h> 32 #include <asm/iommu.h> 33 #include <asm/tce.h> 34 #include <asm/firmware.h> 35 #include <asm/eeh_event.h> 36 #include <asm/eeh.h> 37 38 #include "powernv.h" 39 #include "pci.h" 40 41 static DEFINE_MUTEX(p2p_mutex); 42 static DEFINE_MUTEX(tunnel_mutex); 43 44 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) 45 { 46 struct device_node *parent = np; 47 u32 bdfn; 48 u64 phbid; 49 int ret; 50 51 ret = of_property_read_u32(np, "reg", &bdfn); 52 if (ret) 53 return -ENXIO; 54 55 bdfn = ((bdfn & 0x00ffff00) >> 8); 56 while ((parent = of_get_parent(parent))) { 57 if (!PCI_DN(parent)) { 58 of_node_put(parent); 59 break; 60 } 61 62 if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) { 63 of_node_put(parent); 64 continue; 65 } 66 67 ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid); 68 if (ret) { 69 of_node_put(parent); 70 return -ENXIO; 71 } 72 73 *id = PCI_SLOT_ID(phbid, bdfn); 74 return 0; 75 } 76 77 return -ENODEV; 78 } 79 EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id); 80 81 int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len) 82 { 83 int64_t rc; 84 85 if (!opal_check_token(OPAL_GET_DEVICE_TREE)) 86 return -ENXIO; 87 88 rc = opal_get_device_tree(phandle, (uint64_t)buf, len); 89 if (rc < OPAL_SUCCESS) 90 return -EIO; 91 92 return rc; 93 } 94 EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree); 95 96 int pnv_pci_get_presence_state(uint64_t id, uint8_t *state) 97 { 98 int64_t rc; 99 100 if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE)) 101 return -ENXIO; 102 103 rc = opal_pci_get_presence_state(id, (uint64_t)state); 104 if (rc != OPAL_SUCCESS) 105 return -EIO; 106 107 return 0; 108 } 109 EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state); 110 111 int pnv_pci_get_power_state(uint64_t id, uint8_t *state) 112 { 113 int64_t rc; 114 115 if (!opal_check_token(OPAL_PCI_GET_POWER_STATE)) 116 return -ENXIO; 117 118 rc = opal_pci_get_power_state(id, (uint64_t)state); 119 if (rc != OPAL_SUCCESS) 120 return -EIO; 121 122 return 0; 123 } 124 EXPORT_SYMBOL_GPL(pnv_pci_get_power_state); 125 126 int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg) 127 { 128 struct opal_msg m; 129 int token, ret; 130 int64_t rc; 131 132 if (!opal_check_token(OPAL_PCI_SET_POWER_STATE)) 133 return -ENXIO; 134 135 token = opal_async_get_token_interruptible(); 136 if (unlikely(token < 0)) 137 return token; 138 139 rc = opal_pci_set_power_state(token, id, (uint64_t)&state); 140 if (rc == OPAL_SUCCESS) { 141 ret = 0; 142 goto exit; 143 } else if (rc != OPAL_ASYNC_COMPLETION) { 144 ret = -EIO; 145 goto exit; 146 } 147 148 ret = opal_async_wait_response(token, &m); 149 if (ret < 0) 150 goto exit; 151 152 if (msg) { 153 ret = 1; 154 memcpy(msg, &m, sizeof(m)); 155 } 156 157 exit: 158 opal_async_release_token(token); 159 return ret; 160 } 161 EXPORT_SYMBOL_GPL(pnv_pci_set_power_state); 162 163 #ifdef CONFIG_PCI_MSI 164 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 165 { 166 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 167 struct pnv_phb *phb = hose->private_data; 168 struct msi_desc *entry; 169 struct msi_msg msg; 170 int hwirq; 171 unsigned int virq; 172 int rc; 173 174 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) 175 return -ENODEV; 176 177 if (pdev->no_64bit_msi && !phb->msi32_support) 178 return -ENODEV; 179 180 for_each_pci_msi_entry(entry, pdev) { 181 if (!entry->msi_attrib.is_64 && !phb->msi32_support) { 182 pr_warn("%s: Supports only 64-bit MSIs\n", 183 pci_name(pdev)); 184 return -ENXIO; 185 } 186 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1); 187 if (hwirq < 0) { 188 pr_warn("%s: Failed to find a free MSI\n", 189 pci_name(pdev)); 190 return -ENOSPC; 191 } 192 virq = irq_create_mapping(NULL, phb->msi_base + hwirq); 193 if (!virq) { 194 pr_warn("%s: Failed to map MSI to linux irq\n", 195 pci_name(pdev)); 196 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 197 return -ENOMEM; 198 } 199 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq, 200 virq, entry->msi_attrib.is_64, &msg); 201 if (rc) { 202 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev)); 203 irq_dispose_mapping(virq); 204 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 205 return rc; 206 } 207 irq_set_msi_desc(virq, entry); 208 pci_write_msi_msg(virq, &msg); 209 } 210 return 0; 211 } 212 213 void pnv_teardown_msi_irqs(struct pci_dev *pdev) 214 { 215 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 216 struct pnv_phb *phb = hose->private_data; 217 struct msi_desc *entry; 218 irq_hw_number_t hwirq; 219 220 if (WARN_ON(!phb)) 221 return; 222 223 for_each_pci_msi_entry(entry, pdev) { 224 if (!entry->irq) 225 continue; 226 hwirq = virq_to_hw(entry->irq); 227 irq_set_msi_desc(entry->irq, NULL); 228 irq_dispose_mapping(entry->irq); 229 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); 230 } 231 } 232 #endif /* CONFIG_PCI_MSI */ 233 234 /* Nicely print the contents of the PE State Tables (PEST). */ 235 static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size) 236 { 237 __be64 prevA = ULONG_MAX, prevB = ULONG_MAX; 238 bool dup = false; 239 int i; 240 241 for (i = 0; i < pest_size; i++) { 242 __be64 peA = be64_to_cpu(pestA[i]); 243 __be64 peB = be64_to_cpu(pestB[i]); 244 245 if (peA != prevA || peB != prevB) { 246 if (dup) { 247 pr_info("PE[..%03x] A/B: as above\n", i-1); 248 dup = false; 249 } 250 prevA = peA; 251 prevB = peB; 252 if (peA & PNV_IODA_STOPPED_STATE || 253 peB & PNV_IODA_STOPPED_STATE) 254 pr_info("PE[%03x] A/B: %016llx %016llx\n", 255 i, peA, peB); 256 } else if (!dup && (peA & PNV_IODA_STOPPED_STATE || 257 peB & PNV_IODA_STOPPED_STATE)) { 258 dup = true; 259 } 260 } 261 } 262 263 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, 264 struct OpalIoPhbErrorCommon *common) 265 { 266 struct OpalIoP7IOCPhbErrorData *data; 267 268 data = (struct OpalIoP7IOCPhbErrorData *)common; 269 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n", 270 hose->global_number, be32_to_cpu(common->version)); 271 272 if (data->brdgCtl) 273 pr_info("brdgCtl: %08x\n", 274 be32_to_cpu(data->brdgCtl)); 275 if (data->portStatusReg || data->rootCmplxStatus || 276 data->busAgentStatus) 277 pr_info("UtlSts: %08x %08x %08x\n", 278 be32_to_cpu(data->portStatusReg), 279 be32_to_cpu(data->rootCmplxStatus), 280 be32_to_cpu(data->busAgentStatus)); 281 if (data->deviceStatus || data->slotStatus || 282 data->linkStatus || data->devCmdStatus || 283 data->devSecStatus) 284 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 285 be32_to_cpu(data->deviceStatus), 286 be32_to_cpu(data->slotStatus), 287 be32_to_cpu(data->linkStatus), 288 be32_to_cpu(data->devCmdStatus), 289 be32_to_cpu(data->devSecStatus)); 290 if (data->rootErrorStatus || data->uncorrErrorStatus || 291 data->corrErrorStatus) 292 pr_info("RootErrSts: %08x %08x %08x\n", 293 be32_to_cpu(data->rootErrorStatus), 294 be32_to_cpu(data->uncorrErrorStatus), 295 be32_to_cpu(data->corrErrorStatus)); 296 if (data->tlpHdr1 || data->tlpHdr2 || 297 data->tlpHdr3 || data->tlpHdr4) 298 pr_info("RootErrLog: %08x %08x %08x %08x\n", 299 be32_to_cpu(data->tlpHdr1), 300 be32_to_cpu(data->tlpHdr2), 301 be32_to_cpu(data->tlpHdr3), 302 be32_to_cpu(data->tlpHdr4)); 303 if (data->sourceId || data->errorClass || 304 data->correlator) 305 pr_info("RootErrLog1: %08x %016llx %016llx\n", 306 be32_to_cpu(data->sourceId), 307 be64_to_cpu(data->errorClass), 308 be64_to_cpu(data->correlator)); 309 if (data->p7iocPlssr || data->p7iocCsr) 310 pr_info("PhbSts: %016llx %016llx\n", 311 be64_to_cpu(data->p7iocPlssr), 312 be64_to_cpu(data->p7iocCsr)); 313 if (data->lemFir) 314 pr_info("Lem: %016llx %016llx %016llx\n", 315 be64_to_cpu(data->lemFir), 316 be64_to_cpu(data->lemErrorMask), 317 be64_to_cpu(data->lemWOF)); 318 if (data->phbErrorStatus) 319 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 320 be64_to_cpu(data->phbErrorStatus), 321 be64_to_cpu(data->phbFirstErrorStatus), 322 be64_to_cpu(data->phbErrorLog0), 323 be64_to_cpu(data->phbErrorLog1)); 324 if (data->mmioErrorStatus) 325 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 326 be64_to_cpu(data->mmioErrorStatus), 327 be64_to_cpu(data->mmioFirstErrorStatus), 328 be64_to_cpu(data->mmioErrorLog0), 329 be64_to_cpu(data->mmioErrorLog1)); 330 if (data->dma0ErrorStatus) 331 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 332 be64_to_cpu(data->dma0ErrorStatus), 333 be64_to_cpu(data->dma0FirstErrorStatus), 334 be64_to_cpu(data->dma0ErrorLog0), 335 be64_to_cpu(data->dma0ErrorLog1)); 336 if (data->dma1ErrorStatus) 337 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 338 be64_to_cpu(data->dma1ErrorStatus), 339 be64_to_cpu(data->dma1FirstErrorStatus), 340 be64_to_cpu(data->dma1ErrorLog0), 341 be64_to_cpu(data->dma1ErrorLog1)); 342 343 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS); 344 } 345 346 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, 347 struct OpalIoPhbErrorCommon *common) 348 { 349 struct OpalIoPhb3ErrorData *data; 350 351 data = (struct OpalIoPhb3ErrorData*)common; 352 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n", 353 hose->global_number, be32_to_cpu(common->version)); 354 if (data->brdgCtl) 355 pr_info("brdgCtl: %08x\n", 356 be32_to_cpu(data->brdgCtl)); 357 if (data->portStatusReg || data->rootCmplxStatus || 358 data->busAgentStatus) 359 pr_info("UtlSts: %08x %08x %08x\n", 360 be32_to_cpu(data->portStatusReg), 361 be32_to_cpu(data->rootCmplxStatus), 362 be32_to_cpu(data->busAgentStatus)); 363 if (data->deviceStatus || data->slotStatus || 364 data->linkStatus || data->devCmdStatus || 365 data->devSecStatus) 366 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 367 be32_to_cpu(data->deviceStatus), 368 be32_to_cpu(data->slotStatus), 369 be32_to_cpu(data->linkStatus), 370 be32_to_cpu(data->devCmdStatus), 371 be32_to_cpu(data->devSecStatus)); 372 if (data->rootErrorStatus || data->uncorrErrorStatus || 373 data->corrErrorStatus) 374 pr_info("RootErrSts: %08x %08x %08x\n", 375 be32_to_cpu(data->rootErrorStatus), 376 be32_to_cpu(data->uncorrErrorStatus), 377 be32_to_cpu(data->corrErrorStatus)); 378 if (data->tlpHdr1 || data->tlpHdr2 || 379 data->tlpHdr3 || data->tlpHdr4) 380 pr_info("RootErrLog: %08x %08x %08x %08x\n", 381 be32_to_cpu(data->tlpHdr1), 382 be32_to_cpu(data->tlpHdr2), 383 be32_to_cpu(data->tlpHdr3), 384 be32_to_cpu(data->tlpHdr4)); 385 if (data->sourceId || data->errorClass || 386 data->correlator) 387 pr_info("RootErrLog1: %08x %016llx %016llx\n", 388 be32_to_cpu(data->sourceId), 389 be64_to_cpu(data->errorClass), 390 be64_to_cpu(data->correlator)); 391 if (data->nFir) 392 pr_info("nFir: %016llx %016llx %016llx\n", 393 be64_to_cpu(data->nFir), 394 be64_to_cpu(data->nFirMask), 395 be64_to_cpu(data->nFirWOF)); 396 if (data->phbPlssr || data->phbCsr) 397 pr_info("PhbSts: %016llx %016llx\n", 398 be64_to_cpu(data->phbPlssr), 399 be64_to_cpu(data->phbCsr)); 400 if (data->lemFir) 401 pr_info("Lem: %016llx %016llx %016llx\n", 402 be64_to_cpu(data->lemFir), 403 be64_to_cpu(data->lemErrorMask), 404 be64_to_cpu(data->lemWOF)); 405 if (data->phbErrorStatus) 406 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 407 be64_to_cpu(data->phbErrorStatus), 408 be64_to_cpu(data->phbFirstErrorStatus), 409 be64_to_cpu(data->phbErrorLog0), 410 be64_to_cpu(data->phbErrorLog1)); 411 if (data->mmioErrorStatus) 412 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 413 be64_to_cpu(data->mmioErrorStatus), 414 be64_to_cpu(data->mmioFirstErrorStatus), 415 be64_to_cpu(data->mmioErrorLog0), 416 be64_to_cpu(data->mmioErrorLog1)); 417 if (data->dma0ErrorStatus) 418 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 419 be64_to_cpu(data->dma0ErrorStatus), 420 be64_to_cpu(data->dma0FirstErrorStatus), 421 be64_to_cpu(data->dma0ErrorLog0), 422 be64_to_cpu(data->dma0ErrorLog1)); 423 if (data->dma1ErrorStatus) 424 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 425 be64_to_cpu(data->dma1ErrorStatus), 426 be64_to_cpu(data->dma1FirstErrorStatus), 427 be64_to_cpu(data->dma1ErrorLog0), 428 be64_to_cpu(data->dma1ErrorLog1)); 429 430 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS); 431 } 432 433 static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose, 434 struct OpalIoPhbErrorCommon *common) 435 { 436 struct OpalIoPhb4ErrorData *data; 437 438 data = (struct OpalIoPhb4ErrorData*)common; 439 pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n", 440 hose->global_number, be32_to_cpu(common->version)); 441 if (data->brdgCtl) 442 pr_info("brdgCtl: %08x\n", 443 be32_to_cpu(data->brdgCtl)); 444 if (data->deviceStatus || data->slotStatus || 445 data->linkStatus || data->devCmdStatus || 446 data->devSecStatus) 447 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 448 be32_to_cpu(data->deviceStatus), 449 be32_to_cpu(data->slotStatus), 450 be32_to_cpu(data->linkStatus), 451 be32_to_cpu(data->devCmdStatus), 452 be32_to_cpu(data->devSecStatus)); 453 if (data->rootErrorStatus || data->uncorrErrorStatus || 454 data->corrErrorStatus) 455 pr_info("RootErrSts: %08x %08x %08x\n", 456 be32_to_cpu(data->rootErrorStatus), 457 be32_to_cpu(data->uncorrErrorStatus), 458 be32_to_cpu(data->corrErrorStatus)); 459 if (data->tlpHdr1 || data->tlpHdr2 || 460 data->tlpHdr3 || data->tlpHdr4) 461 pr_info("RootErrLog: %08x %08x %08x %08x\n", 462 be32_to_cpu(data->tlpHdr1), 463 be32_to_cpu(data->tlpHdr2), 464 be32_to_cpu(data->tlpHdr3), 465 be32_to_cpu(data->tlpHdr4)); 466 if (data->sourceId) 467 pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId)); 468 if (data->nFir) 469 pr_info("nFir: %016llx %016llx %016llx\n", 470 be64_to_cpu(data->nFir), 471 be64_to_cpu(data->nFirMask), 472 be64_to_cpu(data->nFirWOF)); 473 if (data->phbPlssr || data->phbCsr) 474 pr_info("PhbSts: %016llx %016llx\n", 475 be64_to_cpu(data->phbPlssr), 476 be64_to_cpu(data->phbCsr)); 477 if (data->lemFir) 478 pr_info("Lem: %016llx %016llx %016llx\n", 479 be64_to_cpu(data->lemFir), 480 be64_to_cpu(data->lemErrorMask), 481 be64_to_cpu(data->lemWOF)); 482 if (data->phbErrorStatus) 483 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 484 be64_to_cpu(data->phbErrorStatus), 485 be64_to_cpu(data->phbFirstErrorStatus), 486 be64_to_cpu(data->phbErrorLog0), 487 be64_to_cpu(data->phbErrorLog1)); 488 if (data->phbTxeErrorStatus) 489 pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n", 490 be64_to_cpu(data->phbTxeErrorStatus), 491 be64_to_cpu(data->phbTxeFirstErrorStatus), 492 be64_to_cpu(data->phbTxeErrorLog0), 493 be64_to_cpu(data->phbTxeErrorLog1)); 494 if (data->phbRxeArbErrorStatus) 495 pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n", 496 be64_to_cpu(data->phbRxeArbErrorStatus), 497 be64_to_cpu(data->phbRxeArbFirstErrorStatus), 498 be64_to_cpu(data->phbRxeArbErrorLog0), 499 be64_to_cpu(data->phbRxeArbErrorLog1)); 500 if (data->phbRxeMrgErrorStatus) 501 pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n", 502 be64_to_cpu(data->phbRxeMrgErrorStatus), 503 be64_to_cpu(data->phbRxeMrgFirstErrorStatus), 504 be64_to_cpu(data->phbRxeMrgErrorLog0), 505 be64_to_cpu(data->phbRxeMrgErrorLog1)); 506 if (data->phbRxeTceErrorStatus) 507 pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n", 508 be64_to_cpu(data->phbRxeTceErrorStatus), 509 be64_to_cpu(data->phbRxeTceFirstErrorStatus), 510 be64_to_cpu(data->phbRxeTceErrorLog0), 511 be64_to_cpu(data->phbRxeTceErrorLog1)); 512 513 if (data->phbPblErrorStatus) 514 pr_info("PblErr: %016llx %016llx %016llx %016llx\n", 515 be64_to_cpu(data->phbPblErrorStatus), 516 be64_to_cpu(data->phbPblFirstErrorStatus), 517 be64_to_cpu(data->phbPblErrorLog0), 518 be64_to_cpu(data->phbPblErrorLog1)); 519 if (data->phbPcieDlpErrorStatus) 520 pr_info("PcieDlp: %016llx %016llx %016llx\n", 521 be64_to_cpu(data->phbPcieDlpErrorLog1), 522 be64_to_cpu(data->phbPcieDlpErrorLog2), 523 be64_to_cpu(data->phbPcieDlpErrorStatus)); 524 if (data->phbRegbErrorStatus) 525 pr_info("RegbErr: %016llx %016llx %016llx %016llx\n", 526 be64_to_cpu(data->phbRegbErrorStatus), 527 be64_to_cpu(data->phbRegbFirstErrorStatus), 528 be64_to_cpu(data->phbRegbErrorLog0), 529 be64_to_cpu(data->phbRegbErrorLog1)); 530 531 532 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS); 533 } 534 535 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 536 unsigned char *log_buff) 537 { 538 struct OpalIoPhbErrorCommon *common; 539 540 if (!hose || !log_buff) 541 return; 542 543 common = (struct OpalIoPhbErrorCommon *)log_buff; 544 switch (be32_to_cpu(common->ioType)) { 545 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 546 pnv_pci_dump_p7ioc_diag_data(hose, common); 547 break; 548 case OPAL_PHB_ERROR_DATA_TYPE_PHB3: 549 pnv_pci_dump_phb3_diag_data(hose, common); 550 break; 551 case OPAL_PHB_ERROR_DATA_TYPE_PHB4: 552 pnv_pci_dump_phb4_diag_data(hose, common); 553 break; 554 default: 555 pr_warn("%s: Unrecognized ioType %d\n", 556 __func__, be32_to_cpu(common->ioType)); 557 } 558 } 559 560 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) 561 { 562 unsigned long flags, rc; 563 int has_diag, ret = 0; 564 565 spin_lock_irqsave(&phb->lock, flags); 566 567 /* Fetch PHB diag-data */ 568 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, 569 phb->diag_data_size); 570 has_diag = (rc == OPAL_SUCCESS); 571 572 /* If PHB supports compound PE, to handle it */ 573 if (phb->unfreeze_pe) { 574 ret = phb->unfreeze_pe(phb, 575 pe_no, 576 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 577 } else { 578 rc = opal_pci_eeh_freeze_clear(phb->opal_id, 579 pe_no, 580 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 581 if (rc) { 582 pr_warn("%s: Failure %ld clearing frozen " 583 "PHB#%x-PE#%x\n", 584 __func__, rc, phb->hose->global_number, 585 pe_no); 586 ret = -EIO; 587 } 588 } 589 590 /* 591 * For now, let's only display the diag buffer when we fail to clear 592 * the EEH status. We'll do more sensible things later when we have 593 * proper EEH support. We need to make sure we don't pollute ourselves 594 * with the normal errors generated when probing empty slots 595 */ 596 if (has_diag && ret) 597 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); 598 599 spin_unlock_irqrestore(&phb->lock, flags); 600 } 601 602 static void pnv_pci_config_check_eeh(struct pci_dn *pdn) 603 { 604 struct pnv_phb *phb = pdn->phb->private_data; 605 u8 fstate; 606 __be16 pcierr; 607 unsigned int pe_no; 608 s64 rc; 609 610 /* 611 * Get the PE#. During the PCI probe stage, we might not 612 * setup that yet. So all ER errors should be mapped to 613 * reserved PE. 614 */ 615 pe_no = pdn->pe_number; 616 if (pe_no == IODA_INVALID_PE) { 617 pe_no = phb->ioda.reserved_pe_idx; 618 } 619 620 /* 621 * Fetch frozen state. If the PHB support compound PE, 622 * we need handle that case. 623 */ 624 if (phb->get_pe_state) { 625 fstate = phb->get_pe_state(phb, pe_no); 626 } else { 627 rc = opal_pci_eeh_freeze_status(phb->opal_id, 628 pe_no, 629 &fstate, 630 &pcierr, 631 NULL); 632 if (rc) { 633 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n", 634 __func__, rc, phb->hose->global_number, pe_no); 635 return; 636 } 637 } 638 639 pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n", 640 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate); 641 642 /* Clear the frozen state if applicable */ 643 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || 644 fstate == OPAL_EEH_STOPPED_DMA_FREEZE || 645 fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) { 646 /* 647 * If PHB supports compound PE, freeze it for 648 * consistency. 649 */ 650 if (phb->freeze_pe) 651 phb->freeze_pe(phb, pe_no); 652 653 pnv_pci_handle_eeh_config(phb, pe_no); 654 } 655 } 656 657 int pnv_pci_cfg_read(struct pci_dn *pdn, 658 int where, int size, u32 *val) 659 { 660 struct pnv_phb *phb = pdn->phb->private_data; 661 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 662 s64 rc; 663 664 switch (size) { 665 case 1: { 666 u8 v8; 667 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8); 668 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff; 669 break; 670 } 671 case 2: { 672 __be16 v16; 673 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where, 674 &v16); 675 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff; 676 break; 677 } 678 case 4: { 679 __be32 v32; 680 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32); 681 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff; 682 break; 683 } 684 default: 685 return PCIBIOS_FUNC_NOT_SUPPORTED; 686 } 687 688 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 689 __func__, pdn->busno, pdn->devfn, where, size, *val); 690 return PCIBIOS_SUCCESSFUL; 691 } 692 693 int pnv_pci_cfg_write(struct pci_dn *pdn, 694 int where, int size, u32 val) 695 { 696 struct pnv_phb *phb = pdn->phb->private_data; 697 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 698 699 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 700 __func__, pdn->busno, pdn->devfn, where, size, val); 701 switch (size) { 702 case 1: 703 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val); 704 break; 705 case 2: 706 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val); 707 break; 708 case 4: 709 opal_pci_config_write_word(phb->opal_id, bdfn, where, val); 710 break; 711 default: 712 return PCIBIOS_FUNC_NOT_SUPPORTED; 713 } 714 715 return PCIBIOS_SUCCESSFUL; 716 } 717 718 #if CONFIG_EEH 719 static bool pnv_pci_cfg_check(struct pci_dn *pdn) 720 { 721 struct eeh_dev *edev = NULL; 722 struct pnv_phb *phb = pdn->phb->private_data; 723 724 /* EEH not enabled ? */ 725 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 726 return true; 727 728 /* PE reset or device removed ? */ 729 edev = pdn->edev; 730 if (edev) { 731 if (edev->pe && 732 (edev->pe->state & EEH_PE_CFG_BLOCKED)) 733 return false; 734 735 if (edev->mode & EEH_DEV_REMOVED) 736 return false; 737 } 738 739 return true; 740 } 741 #else 742 static inline pnv_pci_cfg_check(struct pci_dn *pdn) 743 { 744 return true; 745 } 746 #endif /* CONFIG_EEH */ 747 748 static int pnv_pci_read_config(struct pci_bus *bus, 749 unsigned int devfn, 750 int where, int size, u32 *val) 751 { 752 struct pci_dn *pdn; 753 struct pnv_phb *phb; 754 int ret; 755 756 *val = 0xFFFFFFFF; 757 pdn = pci_get_pdn_by_devfn(bus, devfn); 758 if (!pdn) 759 return PCIBIOS_DEVICE_NOT_FOUND; 760 761 if (!pnv_pci_cfg_check(pdn)) 762 return PCIBIOS_DEVICE_NOT_FOUND; 763 764 ret = pnv_pci_cfg_read(pdn, where, size, val); 765 phb = pdn->phb->private_data; 766 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) { 767 if (*val == EEH_IO_ERROR_VALUE(size) && 768 eeh_dev_check_failure(pdn->edev)) 769 return PCIBIOS_DEVICE_NOT_FOUND; 770 } else { 771 pnv_pci_config_check_eeh(pdn); 772 } 773 774 return ret; 775 } 776 777 static int pnv_pci_write_config(struct pci_bus *bus, 778 unsigned int devfn, 779 int where, int size, u32 val) 780 { 781 struct pci_dn *pdn; 782 struct pnv_phb *phb; 783 int ret; 784 785 pdn = pci_get_pdn_by_devfn(bus, devfn); 786 if (!pdn) 787 return PCIBIOS_DEVICE_NOT_FOUND; 788 789 if (!pnv_pci_cfg_check(pdn)) 790 return PCIBIOS_DEVICE_NOT_FOUND; 791 792 ret = pnv_pci_cfg_write(pdn, where, size, val); 793 phb = pdn->phb->private_data; 794 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 795 pnv_pci_config_check_eeh(pdn); 796 797 return ret; 798 } 799 800 struct pci_ops pnv_pci_ops = { 801 .read = pnv_pci_read_config, 802 .write = pnv_pci_write_config, 803 }; 804 805 static __be64 *pnv_tce(struct iommu_table *tbl, long idx) 806 { 807 __be64 *tmp = ((__be64 *)tbl->it_base); 808 int level = tbl->it_indirect_levels; 809 const long shift = ilog2(tbl->it_level_size); 810 unsigned long mask = (tbl->it_level_size - 1) << (level * shift); 811 812 while (level) { 813 int n = (idx & mask) >> (level * shift); 814 unsigned long tce = be64_to_cpu(tmp[n]); 815 816 tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE)); 817 idx &= ~mask; 818 mask >>= shift; 819 --level; 820 } 821 822 return tmp + idx; 823 } 824 825 int pnv_tce_build(struct iommu_table *tbl, long index, long npages, 826 unsigned long uaddr, enum dma_data_direction direction, 827 unsigned long attrs) 828 { 829 u64 proto_tce = iommu_direction_to_tce_perm(direction); 830 u64 rpn = __pa(uaddr) >> tbl->it_page_shift; 831 long i; 832 833 if (proto_tce & TCE_PCI_WRITE) 834 proto_tce |= TCE_PCI_READ; 835 836 for (i = 0; i < npages; i++) { 837 unsigned long newtce = proto_tce | 838 ((rpn + i) << tbl->it_page_shift); 839 unsigned long idx = index - tbl->it_offset + i; 840 841 *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce); 842 } 843 844 return 0; 845 } 846 847 #ifdef CONFIG_IOMMU_API 848 int pnv_tce_xchg(struct iommu_table *tbl, long index, 849 unsigned long *hpa, enum dma_data_direction *direction) 850 { 851 u64 proto_tce = iommu_direction_to_tce_perm(*direction); 852 unsigned long newtce = *hpa | proto_tce, oldtce; 853 unsigned long idx = index - tbl->it_offset; 854 855 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl)); 856 857 if (newtce & TCE_PCI_WRITE) 858 newtce |= TCE_PCI_READ; 859 860 oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce))); 861 *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE); 862 *direction = iommu_tce_direction(oldtce); 863 864 return 0; 865 } 866 #endif 867 868 void pnv_tce_free(struct iommu_table *tbl, long index, long npages) 869 { 870 long i; 871 872 for (i = 0; i < npages; i++) { 873 unsigned long idx = index - tbl->it_offset + i; 874 875 *(pnv_tce(tbl, idx)) = cpu_to_be64(0); 876 } 877 } 878 879 unsigned long pnv_tce_get(struct iommu_table *tbl, long index) 880 { 881 return be64_to_cpu(*(pnv_tce(tbl, index - tbl->it_offset))); 882 } 883 884 struct iommu_table *pnv_pci_table_alloc(int nid) 885 { 886 struct iommu_table *tbl; 887 888 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); 889 if (!tbl) 890 return NULL; 891 892 INIT_LIST_HEAD_RCU(&tbl->it_group_list); 893 kref_init(&tbl->it_kref); 894 895 return tbl; 896 } 897 898 long pnv_pci_link_table_and_group(int node, int num, 899 struct iommu_table *tbl, 900 struct iommu_table_group *table_group) 901 { 902 struct iommu_table_group_link *tgl = NULL; 903 904 if (WARN_ON(!tbl || !table_group)) 905 return -EINVAL; 906 907 tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL, 908 node); 909 if (!tgl) 910 return -ENOMEM; 911 912 tgl->table_group = table_group; 913 list_add_rcu(&tgl->next, &tbl->it_group_list); 914 915 table_group->tables[num] = tbl; 916 917 return 0; 918 } 919 920 static void pnv_iommu_table_group_link_free(struct rcu_head *head) 921 { 922 struct iommu_table_group_link *tgl = container_of(head, 923 struct iommu_table_group_link, rcu); 924 925 kfree(tgl); 926 } 927 928 void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, 929 struct iommu_table_group *table_group) 930 { 931 long i; 932 bool found; 933 struct iommu_table_group_link *tgl; 934 935 if (!tbl || !table_group) 936 return; 937 938 /* Remove link to a group from table's list of attached groups */ 939 found = false; 940 list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { 941 if (tgl->table_group == table_group) { 942 list_del_rcu(&tgl->next); 943 call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free); 944 found = true; 945 break; 946 } 947 } 948 if (WARN_ON(!found)) 949 return; 950 951 /* Clean a pointer to iommu_table in iommu_table_group::tables[] */ 952 found = false; 953 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { 954 if (table_group->tables[i] == tbl) { 955 table_group->tables[i] = NULL; 956 found = true; 957 break; 958 } 959 } 960 WARN_ON(!found); 961 } 962 963 void pnv_pci_setup_iommu_table(struct iommu_table *tbl, 964 void *tce_mem, u64 tce_size, 965 u64 dma_offset, unsigned page_shift) 966 { 967 tbl->it_blocksize = 16; 968 tbl->it_base = (unsigned long)tce_mem; 969 tbl->it_page_shift = page_shift; 970 tbl->it_offset = dma_offset >> tbl->it_page_shift; 971 tbl->it_index = 0; 972 tbl->it_size = tce_size >> 3; 973 tbl->it_busno = 0; 974 tbl->it_type = TCE_PCI; 975 } 976 977 void pnv_pci_dma_dev_setup(struct pci_dev *pdev) 978 { 979 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 980 struct pnv_phb *phb = hose->private_data; 981 #ifdef CONFIG_PCI_IOV 982 struct pnv_ioda_pe *pe; 983 struct pci_dn *pdn; 984 985 /* Fix the VF pdn PE number */ 986 if (pdev->is_virtfn) { 987 pdn = pci_get_pdn(pdev); 988 WARN_ON(pdn->pe_number != IODA_INVALID_PE); 989 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 990 if (pe->rid == ((pdev->bus->number << 8) | 991 (pdev->devfn & 0xff))) { 992 pdn->pe_number = pe->pe_number; 993 pe->pdev = pdev; 994 break; 995 } 996 } 997 } 998 #endif /* CONFIG_PCI_IOV */ 999 1000 if (phb && phb->dma_dev_setup) 1001 phb->dma_dev_setup(phb, pdev); 1002 } 1003 1004 void pnv_pci_dma_bus_setup(struct pci_bus *bus) 1005 { 1006 struct pci_controller *hose = bus->sysdata; 1007 struct pnv_phb *phb = hose->private_data; 1008 struct pnv_ioda_pe *pe; 1009 1010 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 1011 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) 1012 continue; 1013 1014 if (!pe->pbus) 1015 continue; 1016 1017 if (bus->number == ((pe->rid >> 8) & 0xFF)) { 1018 pe->pbus = bus; 1019 break; 1020 } 1021 } 1022 } 1023 1024 int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc) 1025 { 1026 struct pci_controller *hose; 1027 struct pnv_phb *phb_init, *phb_target; 1028 struct pnv_ioda_pe *pe_init; 1029 int rc; 1030 1031 if (!opal_check_token(OPAL_PCI_SET_P2P)) 1032 return -ENXIO; 1033 1034 hose = pci_bus_to_host(initiator->bus); 1035 phb_init = hose->private_data; 1036 1037 hose = pci_bus_to_host(target->bus); 1038 phb_target = hose->private_data; 1039 1040 pe_init = pnv_ioda_get_pe(initiator); 1041 if (!pe_init) 1042 return -ENODEV; 1043 1044 /* 1045 * Configuring the initiator's PHB requires to adjust its 1046 * TVE#1 setting. Since the same device can be an initiator 1047 * several times for different target devices, we need to keep 1048 * a reference count to know when we can restore the default 1049 * bypass setting on its TVE#1 when disabling. Opal is not 1050 * tracking PE states, so we add a reference count on the PE 1051 * in linux. 1052 * 1053 * For the target, the configuration is per PHB, so we keep a 1054 * target reference count on the PHB. 1055 */ 1056 mutex_lock(&p2p_mutex); 1057 1058 if (desc & OPAL_PCI_P2P_ENABLE) { 1059 /* always go to opal to validate the configuration */ 1060 rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id, 1061 desc, pe_init->pe_number); 1062 1063 if (rc != OPAL_SUCCESS) { 1064 rc = -EIO; 1065 goto out; 1066 } 1067 1068 pe_init->p2p_initiator_count++; 1069 phb_target->p2p_target_count++; 1070 } else { 1071 if (!pe_init->p2p_initiator_count || 1072 !phb_target->p2p_target_count) { 1073 rc = -EINVAL; 1074 goto out; 1075 } 1076 1077 if (--pe_init->p2p_initiator_count == 0) 1078 pnv_pci_ioda2_set_bypass(pe_init, true); 1079 1080 if (--phb_target->p2p_target_count == 0) { 1081 rc = opal_pci_set_p2p(phb_init->opal_id, 1082 phb_target->opal_id, desc, 1083 pe_init->pe_number); 1084 if (rc != OPAL_SUCCESS) { 1085 rc = -EIO; 1086 goto out; 1087 } 1088 } 1089 } 1090 rc = 0; 1091 out: 1092 mutex_unlock(&p2p_mutex); 1093 return rc; 1094 } 1095 EXPORT_SYMBOL_GPL(pnv_pci_set_p2p); 1096 1097 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) 1098 { 1099 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1100 1101 return of_node_get(hose->dn); 1102 } 1103 EXPORT_SYMBOL(pnv_pci_get_phb_node); 1104 1105 int pnv_pci_enable_tunnel(struct pci_dev *dev, u64 *asnind) 1106 { 1107 struct device_node *np; 1108 const __be32 *prop; 1109 struct pnv_ioda_pe *pe; 1110 uint16_t window_id; 1111 int rc; 1112 1113 if (!radix_enabled()) 1114 return -ENXIO; 1115 1116 if (!(np = pnv_pci_get_phb_node(dev))) 1117 return -ENXIO; 1118 1119 prop = of_get_property(np, "ibm,phb-indications", NULL); 1120 of_node_put(np); 1121 1122 if (!prop || !prop[1]) 1123 return -ENXIO; 1124 1125 *asnind = (u64)be32_to_cpu(prop[1]); 1126 pe = pnv_ioda_get_pe(dev); 1127 if (!pe) 1128 return -ENODEV; 1129 1130 /* Increase real window size to accept as_notify messages. */ 1131 window_id = (pe->pe_number << 1 ) + 1; 1132 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number, 1133 window_id, pe->tce_bypass_base, 1134 (uint64_t)1 << 48); 1135 return opal_error_code(rc); 1136 } 1137 EXPORT_SYMBOL_GPL(pnv_pci_enable_tunnel); 1138 1139 int pnv_pci_disable_tunnel(struct pci_dev *dev) 1140 { 1141 struct pnv_ioda_pe *pe; 1142 1143 pe = pnv_ioda_get_pe(dev); 1144 if (!pe) 1145 return -ENODEV; 1146 1147 /* Restore default real window size. */ 1148 pnv_pci_ioda2_set_bypass(pe, true); 1149 return 0; 1150 } 1151 EXPORT_SYMBOL_GPL(pnv_pci_disable_tunnel); 1152 1153 int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) 1154 { 1155 __be64 val; 1156 struct pci_controller *hose; 1157 struct pnv_phb *phb; 1158 u64 tunnel_bar; 1159 int rc; 1160 1161 if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) 1162 return -ENXIO; 1163 if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) 1164 return -ENXIO; 1165 1166 hose = pci_bus_to_host(dev->bus); 1167 phb = hose->private_data; 1168 1169 mutex_lock(&tunnel_mutex); 1170 rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); 1171 if (rc != OPAL_SUCCESS) { 1172 rc = -EIO; 1173 goto out; 1174 } 1175 tunnel_bar = be64_to_cpu(val); 1176 if (enable) { 1177 /* 1178 * Only one device per PHB can use atomics. 1179 * Our policy is first-come, first-served. 1180 */ 1181 if (tunnel_bar) { 1182 if (tunnel_bar != addr) 1183 rc = -EBUSY; 1184 else 1185 rc = 0; /* Setting same address twice is ok */ 1186 goto out; 1187 } 1188 } else { 1189 /* 1190 * The device that owns atomics and wants to release 1191 * them must pass the same address with enable == 0. 1192 */ 1193 if (tunnel_bar != addr) { 1194 rc = -EPERM; 1195 goto out; 1196 } 1197 addr = 0x0ULL; 1198 } 1199 rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr); 1200 rc = opal_error_code(rc); 1201 out: 1202 mutex_unlock(&tunnel_mutex); 1203 return rc; 1204 } 1205 EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar); 1206 1207 #ifdef CONFIG_PPC64 /* for thread.tidr */ 1208 int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, u32 *pid, 1209 u32 *tid) 1210 { 1211 struct mm_struct *mm = NULL; 1212 1213 if (task == NULL) 1214 return -EINVAL; 1215 1216 mm = get_task_mm(task); 1217 if (mm == NULL) 1218 return -EINVAL; 1219 1220 *pid = mm->context.id; 1221 mmput(mm); 1222 1223 *tid = task->thread.tidr; 1224 *lpid = mfspr(SPRN_LPID); 1225 return 0; 1226 } 1227 EXPORT_SYMBOL_GPL(pnv_pci_get_as_notify_info); 1228 #endif 1229 1230 void pnv_pci_shutdown(void) 1231 { 1232 struct pci_controller *hose; 1233 1234 list_for_each_entry(hose, &hose_list, list_node) 1235 if (hose->controller_ops.shutdown) 1236 hose->controller_ops.shutdown(hose); 1237 } 1238 1239 /* Fixup wrong class code in p7ioc and p8 root complex */ 1240 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) 1241 { 1242 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 1243 } 1244 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); 1245 1246 void __init pnv_pci_init(void) 1247 { 1248 struct device_node *np; 1249 1250 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN); 1251 1252 /* If we don't have OPAL, eg. in sim, just skip PCI probe */ 1253 if (!firmware_has_feature(FW_FEATURE_OPAL)) 1254 return; 1255 1256 /* Look for IODA IO-Hubs. */ 1257 for_each_compatible_node(np, NULL, "ibm,ioda-hub") { 1258 pnv_pci_init_ioda_hub(np); 1259 } 1260 1261 /* Look for ioda2 built-in PHB3's */ 1262 for_each_compatible_node(np, NULL, "ibm,ioda2-phb") 1263 pnv_pci_init_ioda2_phb(np); 1264 1265 /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */ 1266 for_each_compatible_node(np, NULL, "ibm,ioda3-phb") 1267 pnv_pci_init_ioda2_phb(np); 1268 1269 /* Look for NPU PHBs */ 1270 for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb") 1271 pnv_pci_init_npu_phb(np); 1272 1273 /* 1274 * Look for NPU2 PHBs which we treat mostly as NPU PHBs with 1275 * the exception of TCE kill which requires an OPAL call. 1276 */ 1277 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb") 1278 pnv_pci_init_npu_phb(np); 1279 1280 /* Look for NPU2 OpenCAPI PHBs */ 1281 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb") 1282 pnv_pci_init_npu2_opencapi_phb(np); 1283 1284 /* Configure IOMMU DMA hooks */ 1285 set_pci_dma_ops(&dma_iommu_ops); 1286 } 1287 1288 machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); 1289