1 /* 2 * Support PCI/PCIe on PowerNV platforms 3 * 4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/pci.h> 14 #include <linux/delay.h> 15 #include <linux/string.h> 16 #include <linux/init.h> 17 #include <linux/irq.h> 18 #include <linux/io.h> 19 #include <linux/msi.h> 20 #include <linux/iommu.h> 21 #include <linux/sched/mm.h> 22 23 #include <asm/sections.h> 24 #include <asm/io.h> 25 #include <asm/prom.h> 26 #include <asm/pci-bridge.h> 27 #include <asm/machdep.h> 28 #include <asm/msi_bitmap.h> 29 #include <asm/ppc-pci.h> 30 #include <asm/pnv-pci.h> 31 #include <asm/opal.h> 32 #include <asm/iommu.h> 33 #include <asm/tce.h> 34 #include <asm/firmware.h> 35 #include <asm/eeh_event.h> 36 #include <asm/eeh.h> 37 38 #include "powernv.h" 39 #include "pci.h" 40 41 static DEFINE_MUTEX(p2p_mutex); 42 static DEFINE_MUTEX(tunnel_mutex); 43 44 int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) 45 { 46 struct device_node *parent = np; 47 u32 bdfn; 48 u64 phbid; 49 int ret; 50 51 ret = of_property_read_u32(np, "reg", &bdfn); 52 if (ret) 53 return -ENXIO; 54 55 bdfn = ((bdfn & 0x00ffff00) >> 8); 56 while ((parent = of_get_parent(parent))) { 57 if (!PCI_DN(parent)) { 58 of_node_put(parent); 59 break; 60 } 61 62 if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) { 63 of_node_put(parent); 64 continue; 65 } 66 67 ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid); 68 if (ret) { 69 of_node_put(parent); 70 return -ENXIO; 71 } 72 73 *id = PCI_SLOT_ID(phbid, bdfn); 74 return 0; 75 } 76 77 return -ENODEV; 78 } 79 EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id); 80 81 int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len) 82 { 83 int64_t rc; 84 85 if (!opal_check_token(OPAL_GET_DEVICE_TREE)) 86 return -ENXIO; 87 88 rc = opal_get_device_tree(phandle, (uint64_t)buf, len); 89 if (rc < OPAL_SUCCESS) 90 return -EIO; 91 92 return rc; 93 } 94 EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree); 95 96 int pnv_pci_get_presence_state(uint64_t id, uint8_t *state) 97 { 98 int64_t rc; 99 100 if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE)) 101 return -ENXIO; 102 103 rc = opal_pci_get_presence_state(id, (uint64_t)state); 104 if (rc != OPAL_SUCCESS) 105 return -EIO; 106 107 return 0; 108 } 109 EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state); 110 111 int pnv_pci_get_power_state(uint64_t id, uint8_t *state) 112 { 113 int64_t rc; 114 115 if (!opal_check_token(OPAL_PCI_GET_POWER_STATE)) 116 return -ENXIO; 117 118 rc = opal_pci_get_power_state(id, (uint64_t)state); 119 if (rc != OPAL_SUCCESS) 120 return -EIO; 121 122 return 0; 123 } 124 EXPORT_SYMBOL_GPL(pnv_pci_get_power_state); 125 126 int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg) 127 { 128 struct opal_msg m; 129 int token, ret; 130 int64_t rc; 131 132 if (!opal_check_token(OPAL_PCI_SET_POWER_STATE)) 133 return -ENXIO; 134 135 token = opal_async_get_token_interruptible(); 136 if (unlikely(token < 0)) 137 return token; 138 139 rc = opal_pci_set_power_state(token, id, (uint64_t)&state); 140 if (rc == OPAL_SUCCESS) { 141 ret = 0; 142 goto exit; 143 } else if (rc != OPAL_ASYNC_COMPLETION) { 144 ret = -EIO; 145 goto exit; 146 } 147 148 ret = opal_async_wait_response(token, &m); 149 if (ret < 0) 150 goto exit; 151 152 if (msg) { 153 ret = 1; 154 memcpy(msg, &m, sizeof(m)); 155 } 156 157 exit: 158 opal_async_release_token(token); 159 return ret; 160 } 161 EXPORT_SYMBOL_GPL(pnv_pci_set_power_state); 162 163 #ifdef CONFIG_PCI_MSI 164 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 165 { 166 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 167 struct pnv_phb *phb = hose->private_data; 168 struct msi_desc *entry; 169 struct msi_msg msg; 170 int hwirq; 171 unsigned int virq; 172 int rc; 173 174 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) 175 return -ENODEV; 176 177 if (pdev->no_64bit_msi && !phb->msi32_support) 178 return -ENODEV; 179 180 for_each_pci_msi_entry(entry, pdev) { 181 if (!entry->msi_attrib.is_64 && !phb->msi32_support) { 182 pr_warn("%s: Supports only 64-bit MSIs\n", 183 pci_name(pdev)); 184 return -ENXIO; 185 } 186 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1); 187 if (hwirq < 0) { 188 pr_warn("%s: Failed to find a free MSI\n", 189 pci_name(pdev)); 190 return -ENOSPC; 191 } 192 virq = irq_create_mapping(NULL, phb->msi_base + hwirq); 193 if (!virq) { 194 pr_warn("%s: Failed to map MSI to linux irq\n", 195 pci_name(pdev)); 196 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 197 return -ENOMEM; 198 } 199 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq, 200 virq, entry->msi_attrib.is_64, &msg); 201 if (rc) { 202 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev)); 203 irq_dispose_mapping(virq); 204 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1); 205 return rc; 206 } 207 irq_set_msi_desc(virq, entry); 208 pci_write_msi_msg(virq, &msg); 209 } 210 return 0; 211 } 212 213 void pnv_teardown_msi_irqs(struct pci_dev *pdev) 214 { 215 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 216 struct pnv_phb *phb = hose->private_data; 217 struct msi_desc *entry; 218 irq_hw_number_t hwirq; 219 220 if (WARN_ON(!phb)) 221 return; 222 223 for_each_pci_msi_entry(entry, pdev) { 224 if (!entry->irq) 225 continue; 226 hwirq = virq_to_hw(entry->irq); 227 irq_set_msi_desc(entry->irq, NULL); 228 irq_dispose_mapping(entry->irq); 229 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1); 230 } 231 } 232 #endif /* CONFIG_PCI_MSI */ 233 234 /* Nicely print the contents of the PE State Tables (PEST). */ 235 static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size) 236 { 237 __be64 prevA = ULONG_MAX, prevB = ULONG_MAX; 238 bool dup = false; 239 int i; 240 241 for (i = 0; i < pest_size; i++) { 242 __be64 peA = be64_to_cpu(pestA[i]); 243 __be64 peB = be64_to_cpu(pestB[i]); 244 245 if (peA != prevA || peB != prevB) { 246 if (dup) { 247 pr_info("PE[..%03x] A/B: as above\n", i-1); 248 dup = false; 249 } 250 prevA = peA; 251 prevB = peB; 252 if (peA & PNV_IODA_STOPPED_STATE || 253 peB & PNV_IODA_STOPPED_STATE) 254 pr_info("PE[%03x] A/B: %016llx %016llx\n", 255 i, peA, peB); 256 } else if (!dup && (peA & PNV_IODA_STOPPED_STATE || 257 peB & PNV_IODA_STOPPED_STATE)) { 258 dup = true; 259 } 260 } 261 } 262 263 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, 264 struct OpalIoPhbErrorCommon *common) 265 { 266 struct OpalIoP7IOCPhbErrorData *data; 267 268 data = (struct OpalIoP7IOCPhbErrorData *)common; 269 pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n", 270 hose->global_number, be32_to_cpu(common->version)); 271 272 if (data->brdgCtl) 273 pr_info("brdgCtl: %08x\n", 274 be32_to_cpu(data->brdgCtl)); 275 if (data->portStatusReg || data->rootCmplxStatus || 276 data->busAgentStatus) 277 pr_info("UtlSts: %08x %08x %08x\n", 278 be32_to_cpu(data->portStatusReg), 279 be32_to_cpu(data->rootCmplxStatus), 280 be32_to_cpu(data->busAgentStatus)); 281 if (data->deviceStatus || data->slotStatus || 282 data->linkStatus || data->devCmdStatus || 283 data->devSecStatus) 284 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 285 be32_to_cpu(data->deviceStatus), 286 be32_to_cpu(data->slotStatus), 287 be32_to_cpu(data->linkStatus), 288 be32_to_cpu(data->devCmdStatus), 289 be32_to_cpu(data->devSecStatus)); 290 if (data->rootErrorStatus || data->uncorrErrorStatus || 291 data->corrErrorStatus) 292 pr_info("RootErrSts: %08x %08x %08x\n", 293 be32_to_cpu(data->rootErrorStatus), 294 be32_to_cpu(data->uncorrErrorStatus), 295 be32_to_cpu(data->corrErrorStatus)); 296 if (data->tlpHdr1 || data->tlpHdr2 || 297 data->tlpHdr3 || data->tlpHdr4) 298 pr_info("RootErrLog: %08x %08x %08x %08x\n", 299 be32_to_cpu(data->tlpHdr1), 300 be32_to_cpu(data->tlpHdr2), 301 be32_to_cpu(data->tlpHdr3), 302 be32_to_cpu(data->tlpHdr4)); 303 if (data->sourceId || data->errorClass || 304 data->correlator) 305 pr_info("RootErrLog1: %08x %016llx %016llx\n", 306 be32_to_cpu(data->sourceId), 307 be64_to_cpu(data->errorClass), 308 be64_to_cpu(data->correlator)); 309 if (data->p7iocPlssr || data->p7iocCsr) 310 pr_info("PhbSts: %016llx %016llx\n", 311 be64_to_cpu(data->p7iocPlssr), 312 be64_to_cpu(data->p7iocCsr)); 313 if (data->lemFir) 314 pr_info("Lem: %016llx %016llx %016llx\n", 315 be64_to_cpu(data->lemFir), 316 be64_to_cpu(data->lemErrorMask), 317 be64_to_cpu(data->lemWOF)); 318 if (data->phbErrorStatus) 319 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 320 be64_to_cpu(data->phbErrorStatus), 321 be64_to_cpu(data->phbFirstErrorStatus), 322 be64_to_cpu(data->phbErrorLog0), 323 be64_to_cpu(data->phbErrorLog1)); 324 if (data->mmioErrorStatus) 325 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 326 be64_to_cpu(data->mmioErrorStatus), 327 be64_to_cpu(data->mmioFirstErrorStatus), 328 be64_to_cpu(data->mmioErrorLog0), 329 be64_to_cpu(data->mmioErrorLog1)); 330 if (data->dma0ErrorStatus) 331 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 332 be64_to_cpu(data->dma0ErrorStatus), 333 be64_to_cpu(data->dma0FirstErrorStatus), 334 be64_to_cpu(data->dma0ErrorLog0), 335 be64_to_cpu(data->dma0ErrorLog1)); 336 if (data->dma1ErrorStatus) 337 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 338 be64_to_cpu(data->dma1ErrorStatus), 339 be64_to_cpu(data->dma1FirstErrorStatus), 340 be64_to_cpu(data->dma1ErrorLog0), 341 be64_to_cpu(data->dma1ErrorLog1)); 342 343 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS); 344 } 345 346 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, 347 struct OpalIoPhbErrorCommon *common) 348 { 349 struct OpalIoPhb3ErrorData *data; 350 351 data = (struct OpalIoPhb3ErrorData*)common; 352 pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n", 353 hose->global_number, be32_to_cpu(common->version)); 354 if (data->brdgCtl) 355 pr_info("brdgCtl: %08x\n", 356 be32_to_cpu(data->brdgCtl)); 357 if (data->portStatusReg || data->rootCmplxStatus || 358 data->busAgentStatus) 359 pr_info("UtlSts: %08x %08x %08x\n", 360 be32_to_cpu(data->portStatusReg), 361 be32_to_cpu(data->rootCmplxStatus), 362 be32_to_cpu(data->busAgentStatus)); 363 if (data->deviceStatus || data->slotStatus || 364 data->linkStatus || data->devCmdStatus || 365 data->devSecStatus) 366 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 367 be32_to_cpu(data->deviceStatus), 368 be32_to_cpu(data->slotStatus), 369 be32_to_cpu(data->linkStatus), 370 be32_to_cpu(data->devCmdStatus), 371 be32_to_cpu(data->devSecStatus)); 372 if (data->rootErrorStatus || data->uncorrErrorStatus || 373 data->corrErrorStatus) 374 pr_info("RootErrSts: %08x %08x %08x\n", 375 be32_to_cpu(data->rootErrorStatus), 376 be32_to_cpu(data->uncorrErrorStatus), 377 be32_to_cpu(data->corrErrorStatus)); 378 if (data->tlpHdr1 || data->tlpHdr2 || 379 data->tlpHdr3 || data->tlpHdr4) 380 pr_info("RootErrLog: %08x %08x %08x %08x\n", 381 be32_to_cpu(data->tlpHdr1), 382 be32_to_cpu(data->tlpHdr2), 383 be32_to_cpu(data->tlpHdr3), 384 be32_to_cpu(data->tlpHdr4)); 385 if (data->sourceId || data->errorClass || 386 data->correlator) 387 pr_info("RootErrLog1: %08x %016llx %016llx\n", 388 be32_to_cpu(data->sourceId), 389 be64_to_cpu(data->errorClass), 390 be64_to_cpu(data->correlator)); 391 if (data->nFir) 392 pr_info("nFir: %016llx %016llx %016llx\n", 393 be64_to_cpu(data->nFir), 394 be64_to_cpu(data->nFirMask), 395 be64_to_cpu(data->nFirWOF)); 396 if (data->phbPlssr || data->phbCsr) 397 pr_info("PhbSts: %016llx %016llx\n", 398 be64_to_cpu(data->phbPlssr), 399 be64_to_cpu(data->phbCsr)); 400 if (data->lemFir) 401 pr_info("Lem: %016llx %016llx %016llx\n", 402 be64_to_cpu(data->lemFir), 403 be64_to_cpu(data->lemErrorMask), 404 be64_to_cpu(data->lemWOF)); 405 if (data->phbErrorStatus) 406 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 407 be64_to_cpu(data->phbErrorStatus), 408 be64_to_cpu(data->phbFirstErrorStatus), 409 be64_to_cpu(data->phbErrorLog0), 410 be64_to_cpu(data->phbErrorLog1)); 411 if (data->mmioErrorStatus) 412 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 413 be64_to_cpu(data->mmioErrorStatus), 414 be64_to_cpu(data->mmioFirstErrorStatus), 415 be64_to_cpu(data->mmioErrorLog0), 416 be64_to_cpu(data->mmioErrorLog1)); 417 if (data->dma0ErrorStatus) 418 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 419 be64_to_cpu(data->dma0ErrorStatus), 420 be64_to_cpu(data->dma0FirstErrorStatus), 421 be64_to_cpu(data->dma0ErrorLog0), 422 be64_to_cpu(data->dma0ErrorLog1)); 423 if (data->dma1ErrorStatus) 424 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 425 be64_to_cpu(data->dma1ErrorStatus), 426 be64_to_cpu(data->dma1FirstErrorStatus), 427 be64_to_cpu(data->dma1ErrorLog0), 428 be64_to_cpu(data->dma1ErrorLog1)); 429 430 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS); 431 } 432 433 static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose, 434 struct OpalIoPhbErrorCommon *common) 435 { 436 struct OpalIoPhb4ErrorData *data; 437 438 data = (struct OpalIoPhb4ErrorData*)common; 439 pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n", 440 hose->global_number, be32_to_cpu(common->version)); 441 if (data->brdgCtl) 442 pr_info("brdgCtl: %08x\n", 443 be32_to_cpu(data->brdgCtl)); 444 if (data->deviceStatus || data->slotStatus || 445 data->linkStatus || data->devCmdStatus || 446 data->devSecStatus) 447 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 448 be32_to_cpu(data->deviceStatus), 449 be32_to_cpu(data->slotStatus), 450 be32_to_cpu(data->linkStatus), 451 be32_to_cpu(data->devCmdStatus), 452 be32_to_cpu(data->devSecStatus)); 453 if (data->rootErrorStatus || data->uncorrErrorStatus || 454 data->corrErrorStatus) 455 pr_info("RootErrSts: %08x %08x %08x\n", 456 be32_to_cpu(data->rootErrorStatus), 457 be32_to_cpu(data->uncorrErrorStatus), 458 be32_to_cpu(data->corrErrorStatus)); 459 if (data->tlpHdr1 || data->tlpHdr2 || 460 data->tlpHdr3 || data->tlpHdr4) 461 pr_info("RootErrLog: %08x %08x %08x %08x\n", 462 be32_to_cpu(data->tlpHdr1), 463 be32_to_cpu(data->tlpHdr2), 464 be32_to_cpu(data->tlpHdr3), 465 be32_to_cpu(data->tlpHdr4)); 466 if (data->sourceId) 467 pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId)); 468 if (data->nFir) 469 pr_info("nFir: %016llx %016llx %016llx\n", 470 be64_to_cpu(data->nFir), 471 be64_to_cpu(data->nFirMask), 472 be64_to_cpu(data->nFirWOF)); 473 if (data->phbPlssr || data->phbCsr) 474 pr_info("PhbSts: %016llx %016llx\n", 475 be64_to_cpu(data->phbPlssr), 476 be64_to_cpu(data->phbCsr)); 477 if (data->lemFir) 478 pr_info("Lem: %016llx %016llx %016llx\n", 479 be64_to_cpu(data->lemFir), 480 be64_to_cpu(data->lemErrorMask), 481 be64_to_cpu(data->lemWOF)); 482 if (data->phbErrorStatus) 483 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 484 be64_to_cpu(data->phbErrorStatus), 485 be64_to_cpu(data->phbFirstErrorStatus), 486 be64_to_cpu(data->phbErrorLog0), 487 be64_to_cpu(data->phbErrorLog1)); 488 if (data->phbTxeErrorStatus) 489 pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n", 490 be64_to_cpu(data->phbTxeErrorStatus), 491 be64_to_cpu(data->phbTxeFirstErrorStatus), 492 be64_to_cpu(data->phbTxeErrorLog0), 493 be64_to_cpu(data->phbTxeErrorLog1)); 494 if (data->phbRxeArbErrorStatus) 495 pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n", 496 be64_to_cpu(data->phbRxeArbErrorStatus), 497 be64_to_cpu(data->phbRxeArbFirstErrorStatus), 498 be64_to_cpu(data->phbRxeArbErrorLog0), 499 be64_to_cpu(data->phbRxeArbErrorLog1)); 500 if (data->phbRxeMrgErrorStatus) 501 pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n", 502 be64_to_cpu(data->phbRxeMrgErrorStatus), 503 be64_to_cpu(data->phbRxeMrgFirstErrorStatus), 504 be64_to_cpu(data->phbRxeMrgErrorLog0), 505 be64_to_cpu(data->phbRxeMrgErrorLog1)); 506 if (data->phbRxeTceErrorStatus) 507 pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n", 508 be64_to_cpu(data->phbRxeTceErrorStatus), 509 be64_to_cpu(data->phbRxeTceFirstErrorStatus), 510 be64_to_cpu(data->phbRxeTceErrorLog0), 511 be64_to_cpu(data->phbRxeTceErrorLog1)); 512 513 if (data->phbPblErrorStatus) 514 pr_info("PblErr: %016llx %016llx %016llx %016llx\n", 515 be64_to_cpu(data->phbPblErrorStatus), 516 be64_to_cpu(data->phbPblFirstErrorStatus), 517 be64_to_cpu(data->phbPblErrorLog0), 518 be64_to_cpu(data->phbPblErrorLog1)); 519 if (data->phbPcieDlpErrorStatus) 520 pr_info("PcieDlp: %016llx %016llx %016llx\n", 521 be64_to_cpu(data->phbPcieDlpErrorLog1), 522 be64_to_cpu(data->phbPcieDlpErrorLog2), 523 be64_to_cpu(data->phbPcieDlpErrorStatus)); 524 if (data->phbRegbErrorStatus) 525 pr_info("RegbErr: %016llx %016llx %016llx %016llx\n", 526 be64_to_cpu(data->phbRegbErrorStatus), 527 be64_to_cpu(data->phbRegbFirstErrorStatus), 528 be64_to_cpu(data->phbRegbErrorLog0), 529 be64_to_cpu(data->phbRegbErrorLog1)); 530 531 532 pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS); 533 } 534 535 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, 536 unsigned char *log_buff) 537 { 538 struct OpalIoPhbErrorCommon *common; 539 540 if (!hose || !log_buff) 541 return; 542 543 common = (struct OpalIoPhbErrorCommon *)log_buff; 544 switch (be32_to_cpu(common->ioType)) { 545 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 546 pnv_pci_dump_p7ioc_diag_data(hose, common); 547 break; 548 case OPAL_PHB_ERROR_DATA_TYPE_PHB3: 549 pnv_pci_dump_phb3_diag_data(hose, common); 550 break; 551 case OPAL_PHB_ERROR_DATA_TYPE_PHB4: 552 pnv_pci_dump_phb4_diag_data(hose, common); 553 break; 554 default: 555 pr_warn("%s: Unrecognized ioType %d\n", 556 __func__, be32_to_cpu(common->ioType)); 557 } 558 } 559 560 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) 561 { 562 unsigned long flags, rc; 563 int has_diag, ret = 0; 564 565 spin_lock_irqsave(&phb->lock, flags); 566 567 /* Fetch PHB diag-data */ 568 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, 569 phb->diag_data_size); 570 has_diag = (rc == OPAL_SUCCESS); 571 572 /* If PHB supports compound PE, to handle it */ 573 if (phb->unfreeze_pe) { 574 ret = phb->unfreeze_pe(phb, 575 pe_no, 576 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 577 } else { 578 rc = opal_pci_eeh_freeze_clear(phb->opal_id, 579 pe_no, 580 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); 581 if (rc) { 582 pr_warn("%s: Failure %ld clearing frozen " 583 "PHB#%x-PE#%x\n", 584 __func__, rc, phb->hose->global_number, 585 pe_no); 586 ret = -EIO; 587 } 588 } 589 590 /* 591 * For now, let's only display the diag buffer when we fail to clear 592 * the EEH status. We'll do more sensible things later when we have 593 * proper EEH support. We need to make sure we don't pollute ourselves 594 * with the normal errors generated when probing empty slots 595 */ 596 if (has_diag && ret) 597 pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); 598 599 spin_unlock_irqrestore(&phb->lock, flags); 600 } 601 602 static void pnv_pci_config_check_eeh(struct pci_dn *pdn) 603 { 604 struct pnv_phb *phb = pdn->phb->private_data; 605 u8 fstate; 606 __be16 pcierr; 607 unsigned int pe_no; 608 s64 rc; 609 610 /* 611 * Get the PE#. During the PCI probe stage, we might not 612 * setup that yet. So all ER errors should be mapped to 613 * reserved PE. 614 */ 615 pe_no = pdn->pe_number; 616 if (pe_no == IODA_INVALID_PE) { 617 pe_no = phb->ioda.reserved_pe_idx; 618 } 619 620 /* 621 * Fetch frozen state. If the PHB support compound PE, 622 * we need handle that case. 623 */ 624 if (phb->get_pe_state) { 625 fstate = phb->get_pe_state(phb, pe_no); 626 } else { 627 rc = opal_pci_eeh_freeze_status(phb->opal_id, 628 pe_no, 629 &fstate, 630 &pcierr, 631 NULL); 632 if (rc) { 633 pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n", 634 __func__, rc, phb->hose->global_number, pe_no); 635 return; 636 } 637 } 638 639 pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n", 640 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate); 641 642 /* Clear the frozen state if applicable */ 643 if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || 644 fstate == OPAL_EEH_STOPPED_DMA_FREEZE || 645 fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) { 646 /* 647 * If PHB supports compound PE, freeze it for 648 * consistency. 649 */ 650 if (phb->freeze_pe) 651 phb->freeze_pe(phb, pe_no); 652 653 pnv_pci_handle_eeh_config(phb, pe_no); 654 } 655 } 656 657 int pnv_pci_cfg_read(struct pci_dn *pdn, 658 int where, int size, u32 *val) 659 { 660 struct pnv_phb *phb = pdn->phb->private_data; 661 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 662 s64 rc; 663 664 switch (size) { 665 case 1: { 666 u8 v8; 667 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8); 668 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff; 669 break; 670 } 671 case 2: { 672 __be16 v16; 673 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where, 674 &v16); 675 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff; 676 break; 677 } 678 case 4: { 679 __be32 v32; 680 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32); 681 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff; 682 break; 683 } 684 default: 685 return PCIBIOS_FUNC_NOT_SUPPORTED; 686 } 687 688 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 689 __func__, pdn->busno, pdn->devfn, where, size, *val); 690 return PCIBIOS_SUCCESSFUL; 691 } 692 693 int pnv_pci_cfg_write(struct pci_dn *pdn, 694 int where, int size, u32 val) 695 { 696 struct pnv_phb *phb = pdn->phb->private_data; 697 u32 bdfn = (pdn->busno << 8) | pdn->devfn; 698 699 pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", 700 __func__, pdn->busno, pdn->devfn, where, size, val); 701 switch (size) { 702 case 1: 703 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val); 704 break; 705 case 2: 706 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val); 707 break; 708 case 4: 709 opal_pci_config_write_word(phb->opal_id, bdfn, where, val); 710 break; 711 default: 712 return PCIBIOS_FUNC_NOT_SUPPORTED; 713 } 714 715 return PCIBIOS_SUCCESSFUL; 716 } 717 718 #if CONFIG_EEH 719 static bool pnv_pci_cfg_check(struct pci_dn *pdn) 720 { 721 struct eeh_dev *edev = NULL; 722 struct pnv_phb *phb = pdn->phb->private_data; 723 724 /* EEH not enabled ? */ 725 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 726 return true; 727 728 /* PE reset or device removed ? */ 729 edev = pdn->edev; 730 if (edev) { 731 if (edev->pe && 732 (edev->pe->state & EEH_PE_CFG_BLOCKED)) 733 return false; 734 735 if (edev->mode & EEH_DEV_REMOVED) 736 return false; 737 } 738 739 return true; 740 } 741 #else 742 static inline pnv_pci_cfg_check(struct pci_dn *pdn) 743 { 744 return true; 745 } 746 #endif /* CONFIG_EEH */ 747 748 static int pnv_pci_read_config(struct pci_bus *bus, 749 unsigned int devfn, 750 int where, int size, u32 *val) 751 { 752 struct pci_dn *pdn; 753 struct pnv_phb *phb; 754 int ret; 755 756 *val = 0xFFFFFFFF; 757 pdn = pci_get_pdn_by_devfn(bus, devfn); 758 if (!pdn) 759 return PCIBIOS_DEVICE_NOT_FOUND; 760 761 if (!pnv_pci_cfg_check(pdn)) 762 return PCIBIOS_DEVICE_NOT_FOUND; 763 764 ret = pnv_pci_cfg_read(pdn, where, size, val); 765 phb = pdn->phb->private_data; 766 if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) { 767 if (*val == EEH_IO_ERROR_VALUE(size) && 768 eeh_dev_check_failure(pdn->edev)) 769 return PCIBIOS_DEVICE_NOT_FOUND; 770 } else { 771 pnv_pci_config_check_eeh(pdn); 772 } 773 774 return ret; 775 } 776 777 static int pnv_pci_write_config(struct pci_bus *bus, 778 unsigned int devfn, 779 int where, int size, u32 val) 780 { 781 struct pci_dn *pdn; 782 struct pnv_phb *phb; 783 int ret; 784 785 pdn = pci_get_pdn_by_devfn(bus, devfn); 786 if (!pdn) 787 return PCIBIOS_DEVICE_NOT_FOUND; 788 789 if (!pnv_pci_cfg_check(pdn)) 790 return PCIBIOS_DEVICE_NOT_FOUND; 791 792 ret = pnv_pci_cfg_write(pdn, where, size, val); 793 phb = pdn->phb->private_data; 794 if (!(phb->flags & PNV_PHB_FLAG_EEH)) 795 pnv_pci_config_check_eeh(pdn); 796 797 return ret; 798 } 799 800 struct pci_ops pnv_pci_ops = { 801 .read = pnv_pci_read_config, 802 .write = pnv_pci_write_config, 803 }; 804 805 struct iommu_table *pnv_pci_table_alloc(int nid) 806 { 807 struct iommu_table *tbl; 808 809 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); 810 if (!tbl) 811 return NULL; 812 813 INIT_LIST_HEAD_RCU(&tbl->it_group_list); 814 kref_init(&tbl->it_kref); 815 816 return tbl; 817 } 818 819 void pnv_pci_dma_dev_setup(struct pci_dev *pdev) 820 { 821 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 822 struct pnv_phb *phb = hose->private_data; 823 #ifdef CONFIG_PCI_IOV 824 struct pnv_ioda_pe *pe; 825 struct pci_dn *pdn; 826 827 /* Fix the VF pdn PE number */ 828 if (pdev->is_virtfn) { 829 pdn = pci_get_pdn(pdev); 830 WARN_ON(pdn->pe_number != IODA_INVALID_PE); 831 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 832 if (pe->rid == ((pdev->bus->number << 8) | 833 (pdev->devfn & 0xff))) { 834 pdn->pe_number = pe->pe_number; 835 pe->pdev = pdev; 836 break; 837 } 838 } 839 } 840 #endif /* CONFIG_PCI_IOV */ 841 842 if (phb && phb->dma_dev_setup) 843 phb->dma_dev_setup(phb, pdev); 844 } 845 846 void pnv_pci_dma_bus_setup(struct pci_bus *bus) 847 { 848 struct pci_controller *hose = bus->sysdata; 849 struct pnv_phb *phb = hose->private_data; 850 struct pnv_ioda_pe *pe; 851 852 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 853 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) 854 continue; 855 856 if (!pe->pbus) 857 continue; 858 859 if (bus->number == ((pe->rid >> 8) & 0xFF)) { 860 pe->pbus = bus; 861 break; 862 } 863 } 864 } 865 866 int pnv_pci_set_p2p(struct pci_dev *initiator, struct pci_dev *target, u64 desc) 867 { 868 struct pci_controller *hose; 869 struct pnv_phb *phb_init, *phb_target; 870 struct pnv_ioda_pe *pe_init; 871 int rc; 872 873 if (!opal_check_token(OPAL_PCI_SET_P2P)) 874 return -ENXIO; 875 876 hose = pci_bus_to_host(initiator->bus); 877 phb_init = hose->private_data; 878 879 hose = pci_bus_to_host(target->bus); 880 phb_target = hose->private_data; 881 882 pe_init = pnv_ioda_get_pe(initiator); 883 if (!pe_init) 884 return -ENODEV; 885 886 /* 887 * Configuring the initiator's PHB requires to adjust its 888 * TVE#1 setting. Since the same device can be an initiator 889 * several times for different target devices, we need to keep 890 * a reference count to know when we can restore the default 891 * bypass setting on its TVE#1 when disabling. Opal is not 892 * tracking PE states, so we add a reference count on the PE 893 * in linux. 894 * 895 * For the target, the configuration is per PHB, so we keep a 896 * target reference count on the PHB. 897 */ 898 mutex_lock(&p2p_mutex); 899 900 if (desc & OPAL_PCI_P2P_ENABLE) { 901 /* always go to opal to validate the configuration */ 902 rc = opal_pci_set_p2p(phb_init->opal_id, phb_target->opal_id, 903 desc, pe_init->pe_number); 904 905 if (rc != OPAL_SUCCESS) { 906 rc = -EIO; 907 goto out; 908 } 909 910 pe_init->p2p_initiator_count++; 911 phb_target->p2p_target_count++; 912 } else { 913 if (!pe_init->p2p_initiator_count || 914 !phb_target->p2p_target_count) { 915 rc = -EINVAL; 916 goto out; 917 } 918 919 if (--pe_init->p2p_initiator_count == 0) 920 pnv_pci_ioda2_set_bypass(pe_init, true); 921 922 if (--phb_target->p2p_target_count == 0) { 923 rc = opal_pci_set_p2p(phb_init->opal_id, 924 phb_target->opal_id, desc, 925 pe_init->pe_number); 926 if (rc != OPAL_SUCCESS) { 927 rc = -EIO; 928 goto out; 929 } 930 } 931 } 932 rc = 0; 933 out: 934 mutex_unlock(&p2p_mutex); 935 return rc; 936 } 937 EXPORT_SYMBOL_GPL(pnv_pci_set_p2p); 938 939 struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) 940 { 941 struct pci_controller *hose = pci_bus_to_host(dev->bus); 942 943 return of_node_get(hose->dn); 944 } 945 EXPORT_SYMBOL(pnv_pci_get_phb_node); 946 947 int pnv_pci_enable_tunnel(struct pci_dev *dev, u64 *asnind) 948 { 949 struct device_node *np; 950 const __be32 *prop; 951 struct pnv_ioda_pe *pe; 952 uint16_t window_id; 953 int rc; 954 955 if (!radix_enabled()) 956 return -ENXIO; 957 958 if (!(np = pnv_pci_get_phb_node(dev))) 959 return -ENXIO; 960 961 prop = of_get_property(np, "ibm,phb-indications", NULL); 962 of_node_put(np); 963 964 if (!prop || !prop[1]) 965 return -ENXIO; 966 967 *asnind = (u64)be32_to_cpu(prop[1]); 968 pe = pnv_ioda_get_pe(dev); 969 if (!pe) 970 return -ENODEV; 971 972 /* Increase real window size to accept as_notify messages. */ 973 window_id = (pe->pe_number << 1 ) + 1; 974 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number, 975 window_id, pe->tce_bypass_base, 976 (uint64_t)1 << 48); 977 return opal_error_code(rc); 978 } 979 EXPORT_SYMBOL_GPL(pnv_pci_enable_tunnel); 980 981 int pnv_pci_disable_tunnel(struct pci_dev *dev) 982 { 983 struct pnv_ioda_pe *pe; 984 985 pe = pnv_ioda_get_pe(dev); 986 if (!pe) 987 return -ENODEV; 988 989 /* Restore default real window size. */ 990 pnv_pci_ioda2_set_bypass(pe, true); 991 return 0; 992 } 993 EXPORT_SYMBOL_GPL(pnv_pci_disable_tunnel); 994 995 int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) 996 { 997 __be64 val; 998 struct pci_controller *hose; 999 struct pnv_phb *phb; 1000 u64 tunnel_bar; 1001 int rc; 1002 1003 if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) 1004 return -ENXIO; 1005 if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) 1006 return -ENXIO; 1007 1008 hose = pci_bus_to_host(dev->bus); 1009 phb = hose->private_data; 1010 1011 mutex_lock(&tunnel_mutex); 1012 rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); 1013 if (rc != OPAL_SUCCESS) { 1014 rc = -EIO; 1015 goto out; 1016 } 1017 tunnel_bar = be64_to_cpu(val); 1018 if (enable) { 1019 /* 1020 * Only one device per PHB can use atomics. 1021 * Our policy is first-come, first-served. 1022 */ 1023 if (tunnel_bar) { 1024 if (tunnel_bar != addr) 1025 rc = -EBUSY; 1026 else 1027 rc = 0; /* Setting same address twice is ok */ 1028 goto out; 1029 } 1030 } else { 1031 /* 1032 * The device that owns atomics and wants to release 1033 * them must pass the same address with enable == 0. 1034 */ 1035 if (tunnel_bar != addr) { 1036 rc = -EPERM; 1037 goto out; 1038 } 1039 addr = 0x0ULL; 1040 } 1041 rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr); 1042 rc = opal_error_code(rc); 1043 out: 1044 mutex_unlock(&tunnel_mutex); 1045 return rc; 1046 } 1047 EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar); 1048 1049 #ifdef CONFIG_PPC64 /* for thread.tidr */ 1050 int pnv_pci_get_as_notify_info(struct task_struct *task, u32 *lpid, u32 *pid, 1051 u32 *tid) 1052 { 1053 struct mm_struct *mm = NULL; 1054 1055 if (task == NULL) 1056 return -EINVAL; 1057 1058 mm = get_task_mm(task); 1059 if (mm == NULL) 1060 return -EINVAL; 1061 1062 *pid = mm->context.id; 1063 mmput(mm); 1064 1065 *tid = task->thread.tidr; 1066 *lpid = mfspr(SPRN_LPID); 1067 return 0; 1068 } 1069 EXPORT_SYMBOL_GPL(pnv_pci_get_as_notify_info); 1070 #endif 1071 1072 void pnv_pci_shutdown(void) 1073 { 1074 struct pci_controller *hose; 1075 1076 list_for_each_entry(hose, &hose_list, list_node) 1077 if (hose->controller_ops.shutdown) 1078 hose->controller_ops.shutdown(hose); 1079 } 1080 1081 /* Fixup wrong class code in p7ioc and p8 root complex */ 1082 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) 1083 { 1084 dev->class = PCI_CLASS_BRIDGE_PCI << 8; 1085 } 1086 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); 1087 1088 void __init pnv_pci_init(void) 1089 { 1090 struct device_node *np; 1091 1092 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN); 1093 1094 /* If we don't have OPAL, eg. in sim, just skip PCI probe */ 1095 if (!firmware_has_feature(FW_FEATURE_OPAL)) 1096 return; 1097 1098 /* Look for IODA IO-Hubs. */ 1099 for_each_compatible_node(np, NULL, "ibm,ioda-hub") { 1100 pnv_pci_init_ioda_hub(np); 1101 } 1102 1103 /* Look for ioda2 built-in PHB3's */ 1104 for_each_compatible_node(np, NULL, "ibm,ioda2-phb") 1105 pnv_pci_init_ioda2_phb(np); 1106 1107 /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */ 1108 for_each_compatible_node(np, NULL, "ibm,ioda3-phb") 1109 pnv_pci_init_ioda2_phb(np); 1110 1111 /* Look for NPU PHBs */ 1112 for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb") 1113 pnv_pci_init_npu_phb(np); 1114 1115 /* 1116 * Look for NPU2 PHBs which we treat mostly as NPU PHBs with 1117 * the exception of TCE kill which requires an OPAL call. 1118 */ 1119 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb") 1120 pnv_pci_init_npu_phb(np); 1121 1122 /* Look for NPU2 OpenCAPI PHBs */ 1123 for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb") 1124 pnv_pci_init_npu2_opencapi_phb(np); 1125 1126 /* Configure IOMMU DMA hooks */ 1127 set_pci_dma_ops(&dma_iommu_ops); 1128 } 1129 1130 machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); 1131