1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015 Broadcom Corporation 4 */ 5 6 #include <linux/interrupt.h> 7 #include <linux/irqchip/chained_irq.h> 8 #include <linux/irqchip/irq-msi-lib.h> 9 #include <linux/irqdomain.h> 10 #include <linux/msi.h> 11 #include <linux/of_irq.h> 12 #include <linux/of_pci.h> 13 #include <linux/pci.h> 14 15 #include "pcie-iproc.h" 16 17 #define IPROC_MSI_INTR_EN_SHIFT 11 18 #define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT) 19 #define IPROC_MSI_INT_N_EVENT_SHIFT 1 20 #define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT) 21 #define IPROC_MSI_EQ_EN_SHIFT 0 22 #define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT) 23 24 #define IPROC_MSI_EQ_MASK 0x3f 25 26 /* Max number of GIC interrupts */ 27 #define NR_HW_IRQS 6 28 29 /* Number of entries in each event queue */ 30 #define EQ_LEN 64 31 32 /* Size of each event queue memory region */ 33 #define EQ_MEM_REGION_SIZE SZ_4K 34 35 /* Size of each MSI address region */ 36 #define MSI_MEM_REGION_SIZE SZ_4K 37 38 enum iproc_msi_reg { 39 IPROC_MSI_EQ_PAGE = 0, 40 IPROC_MSI_EQ_PAGE_UPPER, 41 IPROC_MSI_PAGE, 42 IPROC_MSI_PAGE_UPPER, 43 IPROC_MSI_CTRL, 44 IPROC_MSI_EQ_HEAD, 45 IPROC_MSI_EQ_TAIL, 46 IPROC_MSI_INTS_EN, 47 IPROC_MSI_REG_SIZE, 48 }; 49 50 struct iproc_msi; 51 52 /** 53 * struct iproc_msi_grp - iProc MSI group 54 * 55 * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI 56 * event queue. 57 * 58 * @msi: pointer to iProc MSI data 59 * @gic_irq: GIC interrupt 60 * @eq: Event queue number 61 */ 62 struct iproc_msi_grp { 63 struct iproc_msi *msi; 64 int gic_irq; 65 unsigned int eq; 66 }; 67 68 /** 69 * struct iproc_msi - iProc event queue based MSI 70 * 71 * Only meant to be used on platforms without MSI support integrated into the 72 * GIC. 73 * 74 * @pcie: pointer to iProc PCIe data 75 * @reg_offsets: MSI register offsets 76 * @grps: MSI groups 77 * @nr_irqs: number of total interrupts connected to GIC 78 * @nr_cpus: number of toal CPUs 79 * @has_inten_reg: indicates the MSI interrupt enable register needs to be 80 * set explicitly (required for some legacy platforms) 81 * @bitmap: MSI vector bitmap 82 * @bitmap_lock: lock to protect access to the MSI bitmap 83 * @nr_msi_vecs: total number of MSI vectors 84 * @inner_domain: inner IRQ domain 85 * @nr_eq_region: required number of 4K aligned memory region for MSI event 86 * queues 87 * @nr_msi_region: required number of 4K aligned address region for MSI posted 88 * writes 89 * @eq_cpu: pointer to allocated memory region for MSI event queues 90 * @eq_dma: DMA address of MSI event queues 91 * @msi_addr: MSI address 92 */ 93 struct iproc_msi { 94 struct iproc_pcie *pcie; 95 const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE]; 96 struct iproc_msi_grp *grps; 97 int nr_irqs; 98 int nr_cpus; 99 bool has_inten_reg; 100 unsigned long *bitmap; 101 struct mutex bitmap_lock; 102 unsigned int nr_msi_vecs; 103 struct irq_domain *inner_domain; 104 unsigned int nr_eq_region; 105 unsigned int nr_msi_region; 106 void *eq_cpu; 107 dma_addr_t eq_dma; 108 phys_addr_t msi_addr; 109 }; 110 111 static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { 112 { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 }, 113 { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 }, 114 { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 }, 115 { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 }, 116 { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 }, 117 { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 }, 118 }; 119 120 static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { 121 { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 }, 122 { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 }, 123 { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 }, 124 { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c }, 125 }; 126 127 static inline u32 iproc_msi_read_reg(struct iproc_msi *msi, 128 enum iproc_msi_reg reg, 129 unsigned int eq) 130 { 131 struct iproc_pcie *pcie = msi->pcie; 132 133 return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); 134 } 135 136 static inline void iproc_msi_write_reg(struct iproc_msi *msi, 137 enum iproc_msi_reg reg, 138 int eq, u32 val) 139 { 140 struct iproc_pcie *pcie = msi->pcie; 141 142 writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); 143 } 144 145 static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq) 146 { 147 return (hwirq % msi->nr_irqs); 148 } 149 150 static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi, 151 unsigned long hwirq) 152 { 153 if (msi->nr_msi_region > 1) 154 return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE; 155 else 156 return hwirq_to_group(msi, hwirq) * sizeof(u32); 157 } 158 159 static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) 160 { 161 if (msi->nr_eq_region > 1) 162 return eq * EQ_MEM_REGION_SIZE; 163 else 164 return eq * EQ_LEN * sizeof(u32); 165 } 166 167 #define IPROC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ 168 MSI_FLAG_USE_DEF_CHIP_OPS) 169 #define IPROC_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ 170 MSI_FLAG_PCI_MSIX) 171 172 static struct msi_parent_ops iproc_msi_parent_ops = { 173 .required_flags = IPROC_MSI_FLAGS_REQUIRED, 174 .supported_flags = IPROC_MSI_FLAGS_SUPPORTED, 175 .bus_select_token = DOMAIN_BUS_PCI_MSI, 176 .prefix = "iProc-", 177 .init_dev_msi_info = msi_lib_init_dev_msi_info, 178 }; 179 /* 180 * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a 181 * dedicated event queue. Each MSI group can support up to 64 MSI vectors. 182 * 183 * The number of MSI groups varies between different iProc SoCs. The total 184 * number of CPU cores also varies. To support MSI IRQ affinity, we 185 * distribute GIC interrupts across all available CPUs. MSI vector is moved 186 * from one GIC interrupt to another to steer to the target CPU. 187 * 188 * Assuming: 189 * - the number of MSI groups is M 190 * - the number of CPU cores is N 191 * - M is always a multiple of N 192 * 193 * Total number of raw MSI vectors = M * 64 194 * Total number of supported MSI vectors = (M * 64) / N 195 */ 196 static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq) 197 { 198 return (hwirq % msi->nr_cpus); 199 } 200 201 static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi, 202 unsigned long hwirq) 203 { 204 return (hwirq - hwirq_to_cpu(msi, hwirq)); 205 } 206 207 static int iproc_msi_irq_set_affinity(struct irq_data *data, 208 const struct cpumask *mask, bool force) 209 { 210 struct iproc_msi *msi = irq_data_get_irq_chip_data(data); 211 int target_cpu = cpumask_first(mask); 212 int curr_cpu; 213 int ret; 214 215 curr_cpu = hwirq_to_cpu(msi, data->hwirq); 216 if (curr_cpu == target_cpu) 217 ret = IRQ_SET_MASK_OK_DONE; 218 else { 219 /* steer MSI to the target CPU */ 220 data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; 221 ret = IRQ_SET_MASK_OK; 222 } 223 224 irq_data_update_effective_affinity(data, cpumask_of(target_cpu)); 225 226 return ret; 227 } 228 229 static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, 230 struct msi_msg *msg) 231 { 232 struct iproc_msi *msi = irq_data_get_irq_chip_data(data); 233 dma_addr_t addr; 234 235 addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq); 236 msg->address_lo = lower_32_bits(addr); 237 msg->address_hi = upper_32_bits(addr); 238 msg->data = data->hwirq << 5; 239 } 240 241 static struct irq_chip iproc_msi_bottom_irq_chip = { 242 .name = "MSI", 243 .irq_set_affinity = iproc_msi_irq_set_affinity, 244 .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg, 245 }; 246 247 static int iproc_msi_irq_domain_alloc(struct irq_domain *domain, 248 unsigned int virq, unsigned int nr_irqs, 249 void *args) 250 { 251 struct iproc_msi *msi = domain->host_data; 252 int hwirq, i; 253 254 if (msi->nr_cpus > 1 && nr_irqs > 1) 255 return -EINVAL; 256 257 mutex_lock(&msi->bitmap_lock); 258 259 /* 260 * Allocate 'nr_irqs' multiplied by 'nr_cpus' number of MSI vectors 261 * each time 262 */ 263 hwirq = bitmap_find_free_region(msi->bitmap, msi->nr_msi_vecs, 264 order_base_2(msi->nr_cpus * nr_irqs)); 265 266 mutex_unlock(&msi->bitmap_lock); 267 268 if (hwirq < 0) 269 return -ENOSPC; 270 271 for (i = 0; i < nr_irqs; i++) { 272 irq_domain_set_info(domain, virq + i, hwirq + i, 273 &iproc_msi_bottom_irq_chip, 274 domain->host_data, handle_simple_irq, 275 NULL, NULL); 276 } 277 278 return 0; 279 } 280 281 static void iproc_msi_irq_domain_free(struct irq_domain *domain, 282 unsigned int virq, unsigned int nr_irqs) 283 { 284 struct irq_data *data = irq_domain_get_irq_data(domain, virq); 285 struct iproc_msi *msi = irq_data_get_irq_chip_data(data); 286 unsigned int hwirq; 287 288 mutex_lock(&msi->bitmap_lock); 289 290 hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq); 291 bitmap_release_region(msi->bitmap, hwirq, 292 order_base_2(msi->nr_cpus * nr_irqs)); 293 294 mutex_unlock(&msi->bitmap_lock); 295 296 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 297 } 298 299 static const struct irq_domain_ops msi_domain_ops = { 300 .alloc = iproc_msi_irq_domain_alloc, 301 .free = iproc_msi_irq_domain_free, 302 }; 303 304 static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) 305 { 306 u32 __iomem *msg; 307 u32 hwirq; 308 unsigned int offs; 309 310 offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); 311 msg = (u32 __iomem *)(msi->eq_cpu + offs); 312 hwirq = readl(msg); 313 hwirq = (hwirq >> 5) + (hwirq & 0x1f); 314 315 /* 316 * Since we have multiple hwirq mapped to a single MSI vector, 317 * now we need to derive the hwirq at CPU0. It can then be used to 318 * mapped back to virq. 319 */ 320 return hwirq_to_canonical_hwirq(msi, hwirq); 321 } 322 323 static void iproc_msi_handler(struct irq_desc *desc) 324 { 325 struct irq_chip *chip = irq_desc_get_chip(desc); 326 struct iproc_msi_grp *grp; 327 struct iproc_msi *msi; 328 u32 eq, head, tail, nr_events; 329 unsigned long hwirq; 330 331 chained_irq_enter(chip, desc); 332 333 grp = irq_desc_get_handler_data(desc); 334 msi = grp->msi; 335 eq = grp->eq; 336 337 /* 338 * iProc MSI event queue is tracked by head and tail pointers. Head 339 * pointer indicates the next entry (MSI data) to be consumed by SW in 340 * the queue and needs to be updated by SW. iProc MSI core uses the 341 * tail pointer as the next data insertion point. 342 * 343 * Entries between head and tail pointers contain valid MSI data. MSI 344 * data is guaranteed to be in the event queue memory before the tail 345 * pointer is updated by the iProc MSI core. 346 */ 347 head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD, 348 eq) & IPROC_MSI_EQ_MASK; 349 do { 350 tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL, 351 eq) & IPROC_MSI_EQ_MASK; 352 353 /* 354 * Figure out total number of events (MSI data) to be 355 * processed. 356 */ 357 nr_events = (tail < head) ? 358 (EQ_LEN - (head - tail)) : (tail - head); 359 if (!nr_events) 360 break; 361 362 /* process all outstanding events */ 363 while (nr_events--) { 364 hwirq = decode_msi_hwirq(msi, eq, head); 365 generic_handle_domain_irq(msi->inner_domain, hwirq); 366 367 head++; 368 head %= EQ_LEN; 369 } 370 371 /* 372 * Now all outstanding events have been processed. Update the 373 * head pointer. 374 */ 375 iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head); 376 377 /* 378 * Now go read the tail pointer again to see if there are new 379 * outstanding events that came in during the above window. 380 */ 381 } while (true); 382 383 chained_irq_exit(chip, desc); 384 } 385 386 static void iproc_msi_enable(struct iproc_msi *msi) 387 { 388 int i, eq; 389 u32 val; 390 391 /* Program memory region for each event queue */ 392 for (i = 0; i < msi->nr_eq_region; i++) { 393 dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE); 394 395 iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i, 396 lower_32_bits(addr)); 397 iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i, 398 upper_32_bits(addr)); 399 } 400 401 /* Program address region for MSI posted writes */ 402 for (i = 0; i < msi->nr_msi_region; i++) { 403 phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE); 404 405 iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i, 406 lower_32_bits(addr)); 407 iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i, 408 upper_32_bits(addr)); 409 } 410 411 for (eq = 0; eq < msi->nr_irqs; eq++) { 412 /* Enable MSI event queue */ 413 val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | 414 IPROC_MSI_EQ_EN; 415 iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); 416 417 /* 418 * Some legacy platforms require the MSI interrupt enable 419 * register to be set explicitly. 420 */ 421 if (msi->has_inten_reg) { 422 val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); 423 val |= BIT(eq); 424 iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); 425 } 426 } 427 } 428 429 static void iproc_msi_disable(struct iproc_msi *msi) 430 { 431 u32 eq, val; 432 433 for (eq = 0; eq < msi->nr_irqs; eq++) { 434 if (msi->has_inten_reg) { 435 val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); 436 val &= ~BIT(eq); 437 iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); 438 } 439 440 val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq); 441 val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | 442 IPROC_MSI_EQ_EN); 443 iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); 444 } 445 } 446 447 static int iproc_msi_alloc_domains(struct device_node *node, 448 struct iproc_msi *msi) 449 { 450 struct irq_domain_info info = { 451 .fwnode = of_fwnode_handle(node), 452 .ops = &msi_domain_ops, 453 .host_data = msi, 454 .size = msi->nr_msi_vecs, 455 }; 456 457 msi->inner_domain = msi_create_parent_irq_domain(&info, &iproc_msi_parent_ops); 458 if (!msi->inner_domain) 459 return -ENOMEM; 460 461 return 0; 462 } 463 464 static void iproc_msi_free_domains(struct iproc_msi *msi) 465 { 466 if (msi->inner_domain) 467 irq_domain_remove(msi->inner_domain); 468 } 469 470 static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu) 471 { 472 int i; 473 474 for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { 475 irq_set_chained_handler_and_data(msi->grps[i].gic_irq, 476 NULL, NULL); 477 } 478 } 479 480 static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu) 481 { 482 int i, ret; 483 cpumask_var_t mask; 484 struct iproc_pcie *pcie = msi->pcie; 485 486 for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { 487 irq_set_chained_handler_and_data(msi->grps[i].gic_irq, 488 iproc_msi_handler, 489 &msi->grps[i]); 490 /* Dedicate GIC interrupt to each CPU core */ 491 if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 492 cpumask_clear(mask); 493 cpumask_set_cpu(cpu, mask); 494 ret = irq_set_affinity(msi->grps[i].gic_irq, mask); 495 if (ret) 496 dev_err(pcie->dev, 497 "failed to set affinity for IRQ%d\n", 498 msi->grps[i].gic_irq); 499 free_cpumask_var(mask); 500 } else { 501 dev_err(pcie->dev, "failed to alloc CPU mask\n"); 502 ret = -EINVAL; 503 } 504 505 if (ret) { 506 /* Free all configured/unconfigured IRQs */ 507 iproc_msi_irq_free(msi, cpu); 508 return ret; 509 } 510 } 511 512 return 0; 513 } 514 515 int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) 516 { 517 struct iproc_msi *msi; 518 int i, ret; 519 unsigned int cpu; 520 521 if (!of_device_is_compatible(node, "brcm,iproc-msi")) 522 return -ENODEV; 523 524 if (!of_property_read_bool(node, "msi-controller")) 525 return -ENODEV; 526 527 if (pcie->msi) 528 return -EBUSY; 529 530 msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL); 531 if (!msi) 532 return -ENOMEM; 533 534 msi->pcie = pcie; 535 pcie->msi = msi; 536 msi->msi_addr = pcie->base_addr; 537 mutex_init(&msi->bitmap_lock); 538 msi->nr_cpus = num_possible_cpus(); 539 540 if (msi->nr_cpus == 1) 541 iproc_msi_parent_ops.supported_flags |= MSI_FLAG_MULTI_PCI_MSI; 542 543 msi->nr_irqs = of_irq_count(node); 544 if (!msi->nr_irqs) { 545 dev_err(pcie->dev, "found no MSI GIC interrupt\n"); 546 return -ENODEV; 547 } 548 549 if (msi->nr_irqs > NR_HW_IRQS) { 550 dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n", 551 msi->nr_irqs); 552 msi->nr_irqs = NR_HW_IRQS; 553 } 554 555 if (msi->nr_irqs < msi->nr_cpus) { 556 dev_err(pcie->dev, 557 "not enough GIC interrupts for MSI affinity\n"); 558 return -EINVAL; 559 } 560 561 if (msi->nr_irqs % msi->nr_cpus != 0) { 562 msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus; 563 dev_warn(pcie->dev, "Reducing number of interrupts to %d\n", 564 msi->nr_irqs); 565 } 566 567 switch (pcie->type) { 568 case IPROC_PCIE_PAXB_BCMA: 569 case IPROC_PCIE_PAXB: 570 msi->reg_offsets = iproc_msi_reg_paxb; 571 msi->nr_eq_region = 1; 572 msi->nr_msi_region = 1; 573 break; 574 case IPROC_PCIE_PAXC: 575 msi->reg_offsets = iproc_msi_reg_paxc; 576 msi->nr_eq_region = msi->nr_irqs; 577 msi->nr_msi_region = msi->nr_irqs; 578 break; 579 default: 580 dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); 581 return -EINVAL; 582 } 583 584 msi->has_inten_reg = of_property_read_bool(node, "brcm,pcie-msi-inten"); 585 586 msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; 587 msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs, 588 GFP_KERNEL); 589 if (!msi->bitmap) 590 return -ENOMEM; 591 592 msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps), 593 GFP_KERNEL); 594 if (!msi->grps) 595 return -ENOMEM; 596 597 for (i = 0; i < msi->nr_irqs; i++) { 598 unsigned int irq = irq_of_parse_and_map(node, i); 599 600 if (!irq) { 601 dev_err(pcie->dev, "unable to parse/map interrupt\n"); 602 ret = -ENODEV; 603 goto free_irqs; 604 } 605 msi->grps[i].gic_irq = irq; 606 msi->grps[i].msi = msi; 607 msi->grps[i].eq = i; 608 } 609 610 /* Reserve memory for event queue and make sure memories are zeroed */ 611 msi->eq_cpu = dma_alloc_coherent(pcie->dev, 612 msi->nr_eq_region * EQ_MEM_REGION_SIZE, 613 &msi->eq_dma, GFP_KERNEL); 614 if (!msi->eq_cpu) { 615 ret = -ENOMEM; 616 goto free_irqs; 617 } 618 619 ret = iproc_msi_alloc_domains(node, msi); 620 if (ret) { 621 dev_err(pcie->dev, "failed to create MSI domains\n"); 622 goto free_eq_dma; 623 } 624 625 for_each_online_cpu(cpu) { 626 ret = iproc_msi_irq_setup(msi, cpu); 627 if (ret) 628 goto free_msi_irq; 629 } 630 631 iproc_msi_enable(msi); 632 633 return 0; 634 635 free_msi_irq: 636 for_each_online_cpu(cpu) 637 iproc_msi_irq_free(msi, cpu); 638 iproc_msi_free_domains(msi); 639 640 free_eq_dma: 641 dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, 642 msi->eq_cpu, msi->eq_dma); 643 644 free_irqs: 645 for (i = 0; i < msi->nr_irqs; i++) { 646 if (msi->grps[i].gic_irq) 647 irq_dispose_mapping(msi->grps[i].gic_irq); 648 } 649 pcie->msi = NULL; 650 return ret; 651 } 652 EXPORT_SYMBOL(iproc_msi_init); 653 654 void iproc_msi_exit(struct iproc_pcie *pcie) 655 { 656 struct iproc_msi *msi = pcie->msi; 657 unsigned int i, cpu; 658 659 if (!msi) 660 return; 661 662 iproc_msi_disable(msi); 663 664 for_each_online_cpu(cpu) 665 iproc_msi_irq_free(msi, cpu); 666 667 iproc_msi_free_domains(msi); 668 669 dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, 670 msi->eq_cpu, msi->eq_dma); 671 672 for (i = 0; i < msi->nr_irqs; i++) { 673 if (msi->grps[i].gic_irq) 674 irq_dispose_mapping(msi->grps[i].gic_irq); 675 } 676 } 677 EXPORT_SYMBOL(iproc_msi_exit); 678