1 /* 2 * Copyright 2006 Jake Moilanen <moilanen@austin.ibm.com>, IBM Corp. 3 * Copyright 2006-2007 Michael Ellerman, IBM Corp. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; version 2 of the 8 * License. 9 * 10 */ 11 12 #include <linux/device.h> 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 16 #include <asm/rtas.h> 17 #include <asm/hw_irq.h> 18 #include <asm/ppc-pci.h> 19 #include <asm/machdep.h> 20 21 #include "pseries.h" 22 23 static int query_token, change_token; 24 25 #define RTAS_QUERY_FN 0 26 #define RTAS_CHANGE_FN 1 27 #define RTAS_RESET_FN 2 28 #define RTAS_CHANGE_MSI_FN 3 29 #define RTAS_CHANGE_MSIX_FN 4 30 #define RTAS_CHANGE_32MSI_FN 5 31 32 /* RTAS Helpers */ 33 34 static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs) 35 { 36 u32 addr, seq_num, rtas_ret[3]; 37 unsigned long buid; 38 int rc; 39 40 addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); 41 buid = pdn->phb->buid; 42 43 seq_num = 1; 44 do { 45 if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN || 46 func == RTAS_CHANGE_32MSI_FN) 47 rc = rtas_call(change_token, 6, 4, rtas_ret, addr, 48 BUID_HI(buid), BUID_LO(buid), 49 func, num_irqs, seq_num); 50 else 51 rc = rtas_call(change_token, 6, 3, rtas_ret, addr, 52 BUID_HI(buid), BUID_LO(buid), 53 func, num_irqs, seq_num); 54 55 seq_num = rtas_ret[1]; 56 } while (rtas_busy_delay(rc)); 57 58 /* 59 * If the RTAS call succeeded, return the number of irqs allocated. 60 * If not, make sure we return a negative error code. 61 */ 62 if (rc == 0) 63 rc = rtas_ret[0]; 64 else if (rc > 0) 65 rc = -rc; 66 67 pr_debug("rtas_msi: ibm,change_msi(func=%d,num=%d), got %d rc = %d\n", 68 func, num_irqs, rtas_ret[0], rc); 69 70 return rc; 71 } 72 73 static void rtas_disable_msi(struct pci_dev *pdev) 74 { 75 struct pci_dn *pdn; 76 77 pdn = pci_get_pdn(pdev); 78 if (!pdn) 79 return; 80 81 /* 82 * disabling MSI with the explicit interface also disables MSI-X 83 */ 84 if (rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, 0) != 0) { 85 /* 86 * may have failed because explicit interface is not 87 * present 88 */ 89 if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) { 90 pr_debug("rtas_msi: Setting MSIs to 0 failed!\n"); 91 } 92 } 93 } 94 95 static int rtas_query_irq_number(struct pci_dn *pdn, int offset) 96 { 97 u32 addr, rtas_ret[2]; 98 unsigned long buid; 99 int rc; 100 101 addr = rtas_config_addr(pdn->busno, pdn->devfn, 0); 102 buid = pdn->phb->buid; 103 104 do { 105 rc = rtas_call(query_token, 4, 3, rtas_ret, addr, 106 BUID_HI(buid), BUID_LO(buid), offset); 107 } while (rtas_busy_delay(rc)); 108 109 if (rc) { 110 pr_debug("rtas_msi: error (%d) querying source number\n", rc); 111 return rc; 112 } 113 114 return rtas_ret[0]; 115 } 116 117 static void rtas_teardown_msi_irqs(struct pci_dev *pdev) 118 { 119 struct msi_desc *entry; 120 121 for_each_pci_msi_entry(entry, pdev) { 122 if (!entry->irq) 123 continue; 124 125 irq_set_msi_desc(entry->irq, NULL); 126 irq_dispose_mapping(entry->irq); 127 } 128 129 rtas_disable_msi(pdev); 130 } 131 132 static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) 133 { 134 struct device_node *dn; 135 const __be32 *p; 136 u32 req_msi; 137 138 dn = pci_device_to_OF_node(pdev); 139 140 p = of_get_property(dn, prop_name, NULL); 141 if (!p) { 142 pr_debug("rtas_msi: No %s on %pOF\n", prop_name, dn); 143 return -ENOENT; 144 } 145 146 req_msi = be32_to_cpup(p); 147 if (req_msi < nvec) { 148 pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); 149 150 if (req_msi == 0) /* Be paranoid */ 151 return -ENOSPC; 152 153 return req_msi; 154 } 155 156 return 0; 157 } 158 159 static int check_req_msi(struct pci_dev *pdev, int nvec) 160 { 161 return check_req(pdev, nvec, "ibm,req#msi"); 162 } 163 164 static int check_req_msix(struct pci_dev *pdev, int nvec) 165 { 166 return check_req(pdev, nvec, "ibm,req#msi-x"); 167 } 168 169 /* Quota calculation */ 170 171 static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) 172 { 173 struct device_node *dn; 174 const __be32 *p; 175 176 dn = of_node_get(pci_device_to_OF_node(dev)); 177 while (dn) { 178 p = of_get_property(dn, "ibm,pe-total-#msi", NULL); 179 if (p) { 180 pr_debug("rtas_msi: found prop on dn %pOF\n", 181 dn); 182 *total = be32_to_cpup(p); 183 return dn; 184 } 185 186 dn = of_get_next_parent(dn); 187 } 188 189 return NULL; 190 } 191 192 static struct device_node *find_pe_dn(struct pci_dev *dev, int *total) 193 { 194 struct device_node *dn; 195 struct eeh_dev *edev; 196 197 /* Found our PE and assume 8 at that point. */ 198 199 dn = pci_device_to_OF_node(dev); 200 if (!dn) 201 return NULL; 202 203 /* Get the top level device in the PE */ 204 edev = pdn_to_eeh_dev(PCI_DN(dn)); 205 if (edev->pe) 206 edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list); 207 dn = pci_device_to_OF_node(edev->pdev); 208 if (!dn) 209 return NULL; 210 211 /* We actually want the parent */ 212 dn = of_get_parent(dn); 213 if (!dn) 214 return NULL; 215 216 /* Hardcode of 8 for old firmwares */ 217 *total = 8; 218 pr_debug("rtas_msi: using PE dn %pOF\n", dn); 219 220 return dn; 221 } 222 223 struct msi_counts { 224 struct device_node *requestor; 225 int num_devices; 226 int request; 227 int quota; 228 int spare; 229 int over_quota; 230 }; 231 232 static void *count_non_bridge_devices(struct device_node *dn, void *data) 233 { 234 struct msi_counts *counts = data; 235 const __be32 *p; 236 u32 class; 237 238 pr_debug("rtas_msi: counting %pOF\n", dn); 239 240 p = of_get_property(dn, "class-code", NULL); 241 class = p ? be32_to_cpup(p) : 0; 242 243 if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) 244 counts->num_devices++; 245 246 return NULL; 247 } 248 249 static void *count_spare_msis(struct device_node *dn, void *data) 250 { 251 struct msi_counts *counts = data; 252 const __be32 *p; 253 int req; 254 255 if (dn == counts->requestor) 256 req = counts->request; 257 else { 258 /* We don't know if a driver will try to use MSI or MSI-X, 259 * so we just have to punt and use the larger of the two. */ 260 req = 0; 261 p = of_get_property(dn, "ibm,req#msi", NULL); 262 if (p) 263 req = be32_to_cpup(p); 264 265 p = of_get_property(dn, "ibm,req#msi-x", NULL); 266 if (p) 267 req = max(req, (int)be32_to_cpup(p)); 268 } 269 270 if (req < counts->quota) 271 counts->spare += counts->quota - req; 272 else if (req > counts->quota) 273 counts->over_quota++; 274 275 return NULL; 276 } 277 278 static int msi_quota_for_device(struct pci_dev *dev, int request) 279 { 280 struct device_node *pe_dn; 281 struct msi_counts counts; 282 int total; 283 284 pr_debug("rtas_msi: calc quota for %s, request %d\n", pci_name(dev), 285 request); 286 287 pe_dn = find_pe_total_msi(dev, &total); 288 if (!pe_dn) 289 pe_dn = find_pe_dn(dev, &total); 290 291 if (!pe_dn) { 292 pr_err("rtas_msi: couldn't find PE for %s\n", pci_name(dev)); 293 goto out; 294 } 295 296 pr_debug("rtas_msi: found PE %pOF\n", pe_dn); 297 298 memset(&counts, 0, sizeof(struct msi_counts)); 299 300 /* Work out how many devices we have below this PE */ 301 pci_traverse_device_nodes(pe_dn, count_non_bridge_devices, &counts); 302 303 if (counts.num_devices == 0) { 304 pr_err("rtas_msi: found 0 devices under PE for %s\n", 305 pci_name(dev)); 306 goto out; 307 } 308 309 counts.quota = total / counts.num_devices; 310 if (request <= counts.quota) 311 goto out; 312 313 /* else, we have some more calculating to do */ 314 counts.requestor = pci_device_to_OF_node(dev); 315 counts.request = request; 316 pci_traverse_device_nodes(pe_dn, count_spare_msis, &counts); 317 318 /* If the quota isn't an integer multiple of the total, we can 319 * use the remainder as spare MSIs for anyone that wants them. */ 320 counts.spare += total % counts.num_devices; 321 322 /* Divide any spare by the number of over-quota requestors */ 323 if (counts.over_quota) 324 counts.quota += counts.spare / counts.over_quota; 325 326 /* And finally clamp the request to the possibly adjusted quota */ 327 request = min(counts.quota, request); 328 329 pr_debug("rtas_msi: request clamped to quota %d\n", request); 330 out: 331 of_node_put(pe_dn); 332 333 return request; 334 } 335 336 static int check_msix_entries(struct pci_dev *pdev) 337 { 338 struct msi_desc *entry; 339 int expected; 340 341 /* There's no way for us to express to firmware that we want 342 * a discontiguous, or non-zero based, range of MSI-X entries. 343 * So we must reject such requests. */ 344 345 expected = 0; 346 for_each_pci_msi_entry(entry, pdev) { 347 if (entry->msi_attrib.entry_nr != expected) { 348 pr_debug("rtas_msi: bad MSI-X entries.\n"); 349 return -EINVAL; 350 } 351 expected++; 352 } 353 354 return 0; 355 } 356 357 static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev) 358 { 359 u32 addr_hi, addr_lo; 360 361 /* 362 * We should only get in here for IODA1 configs. This is based on the 363 * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS 364 * support, and we are in a PCIe Gen2 slot. 365 */ 366 dev_info(&pdev->dev, 367 "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n"); 368 pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi); 369 addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4); 370 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo); 371 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0); 372 } 373 374 static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) 375 { 376 struct pci_dn *pdn; 377 int hwirq, virq, i, quota, rc; 378 struct msi_desc *entry; 379 struct msi_msg msg; 380 int nvec = nvec_in; 381 int use_32bit_msi_hack = 0; 382 383 if (type == PCI_CAP_ID_MSIX) 384 rc = check_req_msix(pdev, nvec); 385 else 386 rc = check_req_msi(pdev, nvec); 387 388 if (rc) 389 return rc; 390 391 quota = msi_quota_for_device(pdev, nvec); 392 393 if (quota && quota < nvec) 394 return quota; 395 396 if (type == PCI_CAP_ID_MSIX && check_msix_entries(pdev)) 397 return -EINVAL; 398 399 /* 400 * Firmware currently refuse any non power of two allocation 401 * so we round up if the quota will allow it. 402 */ 403 if (type == PCI_CAP_ID_MSIX) { 404 int m = roundup_pow_of_two(nvec); 405 quota = msi_quota_for_device(pdev, m); 406 407 if (quota >= m) 408 nvec = m; 409 } 410 411 pdn = pci_get_pdn(pdev); 412 413 /* 414 * Try the new more explicit firmware interface, if that fails fall 415 * back to the old interface. The old interface is known to never 416 * return MSI-Xs. 417 */ 418 again: 419 if (type == PCI_CAP_ID_MSI) { 420 if (pdev->no_64bit_msi) { 421 rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); 422 if (rc < 0) { 423 /* 424 * We only want to run the 32 bit MSI hack below if 425 * the max bus speed is Gen2 speed 426 */ 427 if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) 428 return rc; 429 430 use_32bit_msi_hack = 1; 431 } 432 } else 433 rc = -1; 434 435 if (rc < 0) 436 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); 437 438 if (rc < 0) { 439 pr_debug("rtas_msi: trying the old firmware call.\n"); 440 rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec); 441 } 442 443 if (use_32bit_msi_hack && rc > 0) 444 rtas_hack_32bit_msi_gen2(pdev); 445 } else 446 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); 447 448 if (rc != nvec) { 449 if (nvec != nvec_in) { 450 nvec = nvec_in; 451 goto again; 452 } 453 pr_debug("rtas_msi: rtas_change_msi() failed\n"); 454 return rc; 455 } 456 457 i = 0; 458 for_each_pci_msi_entry(entry, pdev) { 459 hwirq = rtas_query_irq_number(pdn, i++); 460 if (hwirq < 0) { 461 pr_debug("rtas_msi: error (%d) getting hwirq\n", rc); 462 return hwirq; 463 } 464 465 virq = irq_create_mapping(NULL, hwirq); 466 467 if (!virq) { 468 pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq); 469 return -ENOSPC; 470 } 471 472 dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq); 473 irq_set_msi_desc(virq, entry); 474 475 /* Read config space back so we can restore after reset */ 476 __pci_read_msi_msg(entry, &msg); 477 entry->msg = msg; 478 } 479 480 return 0; 481 } 482 483 static void rtas_msi_pci_irq_fixup(struct pci_dev *pdev) 484 { 485 /* No LSI -> leave MSIs (if any) configured */ 486 if (!pdev->irq) { 487 dev_dbg(&pdev->dev, "rtas_msi: no LSI, nothing to do.\n"); 488 return; 489 } 490 491 /* No MSI -> MSIs can't have been assigned by fw, leave LSI */ 492 if (check_req_msi(pdev, 1) && check_req_msix(pdev, 1)) { 493 dev_dbg(&pdev->dev, "rtas_msi: no req#msi/x, nothing to do.\n"); 494 return; 495 } 496 497 dev_dbg(&pdev->dev, "rtas_msi: disabling existing MSI.\n"); 498 rtas_disable_msi(pdev); 499 } 500 501 static int rtas_msi_init(void) 502 { 503 struct pci_controller *phb; 504 505 query_token = rtas_token("ibm,query-interrupt-source-number"); 506 change_token = rtas_token("ibm,change-msi"); 507 508 if ((query_token == RTAS_UNKNOWN_SERVICE) || 509 (change_token == RTAS_UNKNOWN_SERVICE)) { 510 pr_debug("rtas_msi: no RTAS tokens, no MSI support.\n"); 511 return -1; 512 } 513 514 pr_debug("rtas_msi: Registering RTAS MSI callbacks.\n"); 515 516 WARN_ON(pseries_pci_controller_ops.setup_msi_irqs); 517 pseries_pci_controller_ops.setup_msi_irqs = rtas_setup_msi_irqs; 518 pseries_pci_controller_ops.teardown_msi_irqs = rtas_teardown_msi_irqs; 519 520 list_for_each_entry(phb, &hose_list, list_node) { 521 WARN_ON(phb->controller_ops.setup_msi_irqs); 522 phb->controller_ops.setup_msi_irqs = rtas_setup_msi_irqs; 523 phb->controller_ops.teardown_msi_irqs = rtas_teardown_msi_irqs; 524 } 525 526 WARN_ON(ppc_md.pci_irq_fixup); 527 ppc_md.pci_irq_fixup = rtas_msi_pci_irq_fixup; 528 529 return 0; 530 } 531 machine_arch_initcall(pseries, rtas_msi_init); 532