1 /* 2 * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. 3 * 4 * Author: Tony Li <tony.li@freescale.com> 5 * Jason Jin <Jason.jin@freescale.com> 6 * 7 * The hwirq alloc and free code reuse from sysdev/mpic_msi.c 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 of the 12 * License. 13 * 14 */ 15 #include <linux/irq.h> 16 #include <linux/msi.h> 17 #include <linux/pci.h> 18 #include <linux/slab.h> 19 #include <linux/of_platform.h> 20 #include <linux/interrupt.h> 21 #include <linux/seq_file.h> 22 #include <sysdev/fsl_soc.h> 23 #include <asm/prom.h> 24 #include <asm/hw_irq.h> 25 #include <asm/ppc-pci.h> 26 #include <asm/mpic.h> 27 #include <asm/fsl_hcalls.h> 28 29 #include "fsl_msi.h" 30 #include "fsl_pci.h" 31 32 #define MSIIR_OFFSET_MASK 0xfffff 33 #define MSIIR_IBS_SHIFT 0 34 #define MSIIR_SRS_SHIFT 5 35 #define MSIIR1_IBS_SHIFT 4 36 #define MSIIR1_SRS_SHIFT 0 37 #define MSI_SRS_MASK 0xf 38 #define MSI_IBS_MASK 0x1f 39 40 #define msi_hwirq(msi, msir_index, intr_index) \ 41 ((msir_index) << (msi)->srs_shift | \ 42 ((intr_index) << (msi)->ibs_shift)) 43 44 static LIST_HEAD(msi_head); 45 46 struct fsl_msi_feature { 47 u32 fsl_pic_ip; 48 u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */ 49 }; 50 51 struct fsl_msi_cascade_data { 52 struct fsl_msi *msi_data; 53 int index; 54 int virq; 55 }; 56 57 static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg) 58 { 59 return in_be32(base + (reg >> 2)); 60 } 61 62 /* 63 * We do not need this actually. The MSIR register has been read once 64 * in the cascade interrupt. So, this MSI interrupt has been acked 65 */ 66 static void fsl_msi_end_irq(struct irq_data *d) 67 { 68 } 69 70 static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p) 71 { 72 struct fsl_msi *msi_data = irqd->domain->host_data; 73 irq_hw_number_t hwirq = irqd_to_hwirq(irqd); 74 int cascade_virq, srs; 75 76 srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK; 77 cascade_virq = msi_data->cascade_array[srs]->virq; 78 79 seq_printf(p, " fsl-msi-%d", cascade_virq); 80 } 81 82 83 static struct irq_chip fsl_msi_chip = { 84 .irq_mask = pci_msi_mask_irq, 85 .irq_unmask = pci_msi_unmask_irq, 86 .irq_ack = fsl_msi_end_irq, 87 .irq_print_chip = fsl_msi_print_chip, 88 }; 89 90 static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq, 91 irq_hw_number_t hw) 92 { 93 struct fsl_msi *msi_data = h->host_data; 94 struct irq_chip *chip = &fsl_msi_chip; 95 96 irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING); 97 98 irq_set_chip_data(virq, msi_data); 99 irq_set_chip_and_handler(virq, chip, handle_edge_irq); 100 101 return 0; 102 } 103 104 static const struct irq_domain_ops fsl_msi_host_ops = { 105 .map = fsl_msi_host_map, 106 }; 107 108 static int fsl_msi_init_allocator(struct fsl_msi *msi_data) 109 { 110 int rc, hwirq; 111 112 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX, 113 msi_data->irqhost->of_node); 114 if (rc) 115 return rc; 116 117 /* 118 * Reserve all the hwirqs 119 * The available hwirqs will be released in fsl_msi_setup_hwirq() 120 */ 121 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++) 122 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq); 123 124 return 0; 125 } 126 127 static void fsl_teardown_msi_irqs(struct pci_dev *pdev) 128 { 129 struct msi_desc *entry; 130 struct fsl_msi *msi_data; 131 132 list_for_each_entry(entry, &pdev->msi_list, list) { 133 if (entry->irq == NO_IRQ) 134 continue; 135 msi_data = irq_get_chip_data(entry->irq); 136 irq_set_msi_desc(entry->irq, NULL); 137 msi_bitmap_free_hwirqs(&msi_data->bitmap, 138 virq_to_hw(entry->irq), 1); 139 irq_dispose_mapping(entry->irq); 140 } 141 142 return; 143 } 144 145 static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq, 146 struct msi_msg *msg, 147 struct fsl_msi *fsl_msi_data) 148 { 149 struct fsl_msi *msi_data = fsl_msi_data; 150 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 151 u64 address; /* Physical address of the MSIIR */ 152 int len; 153 const __be64 *reg; 154 155 /* If the msi-address-64 property exists, then use it */ 156 reg = of_get_property(hose->dn, "msi-address-64", &len); 157 if (reg && (len == sizeof(u64))) 158 address = be64_to_cpup(reg); 159 else 160 address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset; 161 162 msg->address_lo = lower_32_bits(address); 163 msg->address_hi = upper_32_bits(address); 164 165 msg->data = hwirq; 166 167 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__, 168 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK, 169 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK); 170 } 171 172 static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 173 { 174 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 175 struct device_node *np; 176 phandle phandle = 0; 177 int rc, hwirq = -ENOMEM; 178 unsigned int virq; 179 struct msi_desc *entry; 180 struct msi_msg msg; 181 struct fsl_msi *msi_data; 182 183 if (type == PCI_CAP_ID_MSIX) 184 pr_debug("fslmsi: MSI-X untested, trying anyway.\n"); 185 186 /* 187 * If the PCI node has an fsl,msi property, then we need to use it 188 * to find the specific MSI. 189 */ 190 np = of_parse_phandle(hose->dn, "fsl,msi", 0); 191 if (np) { 192 if (of_device_is_compatible(np, "fsl,mpic-msi") || 193 of_device_is_compatible(np, "fsl,vmpic-msi") || 194 of_device_is_compatible(np, "fsl,vmpic-msi-v4.3")) 195 phandle = np->phandle; 196 else { 197 dev_err(&pdev->dev, 198 "node %s has an invalid fsl,msi phandle %u\n", 199 hose->dn->full_name, np->phandle); 200 return -EINVAL; 201 } 202 } 203 204 list_for_each_entry(entry, &pdev->msi_list, list) { 205 /* 206 * Loop over all the MSI devices until we find one that has an 207 * available interrupt. 208 */ 209 list_for_each_entry(msi_data, &msi_head, list) { 210 /* 211 * If the PCI node has an fsl,msi property, then we 212 * restrict our search to the corresponding MSI node. 213 * The simplest way is to skip over MSI nodes with the 214 * wrong phandle. Under the Freescale hypervisor, this 215 * has the additional benefit of skipping over MSI 216 * nodes that are not mapped in the PAMU. 217 */ 218 if (phandle && (phandle != msi_data->phandle)) 219 continue; 220 221 hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); 222 if (hwirq >= 0) 223 break; 224 } 225 226 if (hwirq < 0) { 227 rc = hwirq; 228 dev_err(&pdev->dev, "could not allocate MSI interrupt\n"); 229 goto out_free; 230 } 231 232 virq = irq_create_mapping(msi_data->irqhost, hwirq); 233 234 if (virq == NO_IRQ) { 235 dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq); 236 msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); 237 rc = -ENOSPC; 238 goto out_free; 239 } 240 /* chip_data is msi_data via host->hostdata in host->map() */ 241 irq_set_msi_desc(virq, entry); 242 243 fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); 244 pci_write_msi_msg(virq, &msg); 245 } 246 return 0; 247 248 out_free: 249 /* free by the caller of this function */ 250 return rc; 251 } 252 253 static irqreturn_t fsl_msi_cascade(int irq, void *data) 254 { 255 unsigned int cascade_irq; 256 struct fsl_msi *msi_data; 257 int msir_index = -1; 258 u32 msir_value = 0; 259 u32 intr_index; 260 u32 have_shift = 0; 261 struct fsl_msi_cascade_data *cascade_data = data; 262 irqreturn_t ret = IRQ_NONE; 263 264 msi_data = cascade_data->msi_data; 265 266 msir_index = cascade_data->index; 267 268 if (msir_index >= NR_MSI_REG_MAX) 269 cascade_irq = NO_IRQ; 270 271 switch (msi_data->feature & FSL_PIC_IP_MASK) { 272 case FSL_PIC_IP_MPIC: 273 msir_value = fsl_msi_read(msi_data->msi_regs, 274 msir_index * 0x10); 275 break; 276 case FSL_PIC_IP_IPIC: 277 msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4); 278 break; 279 #ifdef CONFIG_EPAPR_PARAVIRT 280 case FSL_PIC_IP_VMPIC: { 281 unsigned int ret; 282 ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value); 283 if (ret) { 284 pr_err("fsl-msi: fh_vmpic_get_msir() failed for " 285 "irq %u (ret=%u)\n", irq, ret); 286 msir_value = 0; 287 } 288 break; 289 } 290 #endif 291 } 292 293 while (msir_value) { 294 intr_index = ffs(msir_value) - 1; 295 296 cascade_irq = irq_linear_revmap(msi_data->irqhost, 297 msi_hwirq(msi_data, msir_index, 298 intr_index + have_shift)); 299 if (cascade_irq != NO_IRQ) { 300 generic_handle_irq(cascade_irq); 301 ret = IRQ_HANDLED; 302 } 303 have_shift += intr_index + 1; 304 msir_value = msir_value >> (intr_index + 1); 305 } 306 307 return ret; 308 } 309 310 static int fsl_of_msi_remove(struct platform_device *ofdev) 311 { 312 struct fsl_msi *msi = platform_get_drvdata(ofdev); 313 int virq, i; 314 315 if (msi->list.prev != NULL) 316 list_del(&msi->list); 317 for (i = 0; i < NR_MSI_REG_MAX; i++) { 318 if (msi->cascade_array[i]) { 319 virq = msi->cascade_array[i]->virq; 320 321 BUG_ON(virq == NO_IRQ); 322 323 free_irq(virq, msi->cascade_array[i]); 324 kfree(msi->cascade_array[i]); 325 irq_dispose_mapping(virq); 326 } 327 } 328 if (msi->bitmap.bitmap) 329 msi_bitmap_free(&msi->bitmap); 330 if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) 331 iounmap(msi->msi_regs); 332 kfree(msi); 333 334 return 0; 335 } 336 337 static struct lock_class_key fsl_msi_irq_class; 338 339 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, 340 int offset, int irq_index) 341 { 342 struct fsl_msi_cascade_data *cascade_data = NULL; 343 int virt_msir, i, ret; 344 345 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index); 346 if (virt_msir == NO_IRQ) { 347 dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n", 348 __func__, irq_index); 349 return 0; 350 } 351 352 cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL); 353 if (!cascade_data) { 354 dev_err(&dev->dev, "No memory for MSI cascade data\n"); 355 return -ENOMEM; 356 } 357 irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); 358 cascade_data->index = offset; 359 cascade_data->msi_data = msi; 360 cascade_data->virq = virt_msir; 361 msi->cascade_array[irq_index] = cascade_data; 362 363 ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD, 364 "fsl-msi-cascade", cascade_data); 365 if (ret) { 366 dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n", 367 virt_msir, ret); 368 return ret; 369 } 370 371 /* Release the hwirqs corresponding to this MSI register */ 372 for (i = 0; i < IRQS_PER_MSI_REG; i++) 373 msi_bitmap_free_hwirqs(&msi->bitmap, 374 msi_hwirq(msi, offset, i), 1); 375 376 return 0; 377 } 378 379 static const struct of_device_id fsl_of_msi_ids[]; 380 static int fsl_of_msi_probe(struct platform_device *dev) 381 { 382 const struct of_device_id *match; 383 struct fsl_msi *msi; 384 struct resource res, msiir; 385 int err, i, j, irq_index, count; 386 const u32 *p; 387 const struct fsl_msi_feature *features; 388 int len; 389 u32 offset; 390 391 match = of_match_device(fsl_of_msi_ids, &dev->dev); 392 if (!match) 393 return -EINVAL; 394 features = match->data; 395 396 printk(KERN_DEBUG "Setting up Freescale MSI support\n"); 397 398 msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL); 399 if (!msi) { 400 dev_err(&dev->dev, "No memory for MSI structure\n"); 401 return -ENOMEM; 402 } 403 platform_set_drvdata(dev, msi); 404 405 msi->irqhost = irq_domain_add_linear(dev->dev.of_node, 406 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi); 407 408 if (msi->irqhost == NULL) { 409 dev_err(&dev->dev, "No memory for MSI irqhost\n"); 410 err = -ENOMEM; 411 goto error_out; 412 } 413 414 /* 415 * Under the Freescale hypervisor, the msi nodes don't have a 'reg' 416 * property. Instead, we use hypercalls to access the MSI. 417 */ 418 if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) { 419 err = of_address_to_resource(dev->dev.of_node, 0, &res); 420 if (err) { 421 dev_err(&dev->dev, "invalid resource for node %s\n", 422 dev->dev.of_node->full_name); 423 goto error_out; 424 } 425 426 msi->msi_regs = ioremap(res.start, resource_size(&res)); 427 if (!msi->msi_regs) { 428 err = -ENOMEM; 429 dev_err(&dev->dev, "could not map node %s\n", 430 dev->dev.of_node->full_name); 431 goto error_out; 432 } 433 msi->msiir_offset = 434 features->msiir_offset + (res.start & 0xfffff); 435 436 /* 437 * First read the MSIIR/MSIIR1 offset from dts 438 * On failure use the hardcode MSIIR offset 439 */ 440 if (of_address_to_resource(dev->dev.of_node, 1, &msiir)) 441 msi->msiir_offset = features->msiir_offset + 442 (res.start & MSIIR_OFFSET_MASK); 443 else 444 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK; 445 } 446 447 msi->feature = features->fsl_pic_ip; 448 449 /* 450 * Remember the phandle, so that we can match with any PCI nodes 451 * that have an "fsl,msi" property. 452 */ 453 msi->phandle = dev->dev.of_node->phandle; 454 455 err = fsl_msi_init_allocator(msi); 456 if (err) { 457 dev_err(&dev->dev, "Error allocating MSI bitmap\n"); 458 goto error_out; 459 } 460 461 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); 462 463 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") || 464 of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) { 465 msi->srs_shift = MSIIR1_SRS_SHIFT; 466 msi->ibs_shift = MSIIR1_IBS_SHIFT; 467 if (p) 468 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n", 469 __func__); 470 471 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1; 472 irq_index++) { 473 err = fsl_msi_setup_hwirq(msi, dev, 474 irq_index, irq_index); 475 if (err) 476 goto error_out; 477 } 478 } else { 479 static const u32 all_avail[] = 480 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG }; 481 482 msi->srs_shift = MSIIR_SRS_SHIFT; 483 msi->ibs_shift = MSIIR_IBS_SHIFT; 484 485 if (p && len % (2 * sizeof(u32)) != 0) { 486 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n", 487 __func__); 488 err = -EINVAL; 489 goto error_out; 490 } 491 492 if (!p) { 493 p = all_avail; 494 len = sizeof(all_avail); 495 } 496 497 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) { 498 if (p[i * 2] % IRQS_PER_MSI_REG || 499 p[i * 2 + 1] % IRQS_PER_MSI_REG) { 500 pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n", 501 __func__, dev->dev.of_node->full_name, 502 p[i * 2 + 1], p[i * 2]); 503 err = -EINVAL; 504 goto error_out; 505 } 506 507 offset = p[i * 2] / IRQS_PER_MSI_REG; 508 count = p[i * 2 + 1] / IRQS_PER_MSI_REG; 509 510 for (j = 0; j < count; j++, irq_index++) { 511 err = fsl_msi_setup_hwirq(msi, dev, offset + j, 512 irq_index); 513 if (err) 514 goto error_out; 515 } 516 } 517 } 518 519 list_add_tail(&msi->list, &msi_head); 520 521 /* The multiple setting ppc_md.setup_msi_irqs will not harm things */ 522 if (!ppc_md.setup_msi_irqs) { 523 ppc_md.setup_msi_irqs = fsl_setup_msi_irqs; 524 ppc_md.teardown_msi_irqs = fsl_teardown_msi_irqs; 525 } else if (ppc_md.setup_msi_irqs != fsl_setup_msi_irqs) { 526 dev_err(&dev->dev, "Different MSI driver already installed!\n"); 527 err = -ENODEV; 528 goto error_out; 529 } 530 return 0; 531 error_out: 532 fsl_of_msi_remove(dev); 533 return err; 534 } 535 536 static const struct fsl_msi_feature mpic_msi_feature = { 537 .fsl_pic_ip = FSL_PIC_IP_MPIC, 538 .msiir_offset = 0x140, 539 }; 540 541 static const struct fsl_msi_feature ipic_msi_feature = { 542 .fsl_pic_ip = FSL_PIC_IP_IPIC, 543 .msiir_offset = 0x38, 544 }; 545 546 static const struct fsl_msi_feature vmpic_msi_feature = { 547 .fsl_pic_ip = FSL_PIC_IP_VMPIC, 548 .msiir_offset = 0, 549 }; 550 551 static const struct of_device_id fsl_of_msi_ids[] = { 552 { 553 .compatible = "fsl,mpic-msi", 554 .data = &mpic_msi_feature, 555 }, 556 { 557 .compatible = "fsl,mpic-msi-v4.3", 558 .data = &mpic_msi_feature, 559 }, 560 { 561 .compatible = "fsl,ipic-msi", 562 .data = &ipic_msi_feature, 563 }, 564 #ifdef CONFIG_EPAPR_PARAVIRT 565 { 566 .compatible = "fsl,vmpic-msi", 567 .data = &vmpic_msi_feature, 568 }, 569 { 570 .compatible = "fsl,vmpic-msi-v4.3", 571 .data = &vmpic_msi_feature, 572 }, 573 #endif 574 {} 575 }; 576 577 static struct platform_driver fsl_of_msi_driver = { 578 .driver = { 579 .name = "fsl-msi", 580 .of_match_table = fsl_of_msi_ids, 581 }, 582 .probe = fsl_of_msi_probe, 583 .remove = fsl_of_msi_remove, 584 }; 585 586 static __init int fsl_of_msi_init(void) 587 { 588 return platform_driver_register(&fsl_of_msi_driver); 589 } 590 591 subsys_initcall(fsl_of_msi_init); 592