1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe host controller driver 4 * 5 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 6 * https://www.samsung.com 7 * 8 * Author: Jingoo Han <jg1.han@samsung.com> 9 */ 10 11 #include <linux/iopoll.h> 12 #include <linux/irqchip/chained_irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/msi.h> 15 #include <linux/of_address.h> 16 #include <linux/of_pci.h> 17 #include <linux/pci_regs.h> 18 #include <linux/platform_device.h> 19 20 #include "../../pci.h" 21 #include "pcie-designware.h" 22 23 static struct pci_ops dw_pcie_ops; 24 static struct pci_ops dw_child_pcie_ops; 25 26 static void dw_msi_ack_irq(struct irq_data *d) 27 { 28 irq_chip_ack_parent(d); 29 } 30 31 static void dw_msi_mask_irq(struct irq_data *d) 32 { 33 pci_msi_mask_irq(d); 34 irq_chip_mask_parent(d); 35 } 36 37 static void dw_msi_unmask_irq(struct irq_data *d) 38 { 39 pci_msi_unmask_irq(d); 40 irq_chip_unmask_parent(d); 41 } 42 43 static struct irq_chip dw_pcie_msi_irq_chip = { 44 .name = "PCI-MSI", 45 .irq_ack = dw_msi_ack_irq, 46 .irq_mask = dw_msi_mask_irq, 47 .irq_unmask = dw_msi_unmask_irq, 48 }; 49 50 static struct msi_domain_info dw_pcie_msi_domain_info = { 51 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 52 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), 53 .chip = &dw_pcie_msi_irq_chip, 54 }; 55 56 /* MSI int handler */ 57 irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp) 58 { 59 int i, pos; 60 unsigned long val; 61 u32 status, num_ctrls; 62 irqreturn_t ret = IRQ_NONE; 63 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 64 65 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 66 67 for (i = 0; i < num_ctrls; i++) { 68 status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + 69 (i * MSI_REG_CTRL_BLOCK_SIZE)); 70 if (!status) 71 continue; 72 73 ret = IRQ_HANDLED; 74 val = status; 75 pos = 0; 76 while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 77 pos)) != MAX_MSI_IRQS_PER_CTRL) { 78 generic_handle_domain_irq(pp->irq_domain, 79 (i * MAX_MSI_IRQS_PER_CTRL) + 80 pos); 81 pos++; 82 } 83 } 84 85 return ret; 86 } 87 88 /* Chained MSI interrupt service routine */ 89 static void dw_chained_msi_isr(struct irq_desc *desc) 90 { 91 struct irq_chip *chip = irq_desc_get_chip(desc); 92 struct dw_pcie_rp *pp; 93 94 chained_irq_enter(chip, desc); 95 96 pp = irq_desc_get_handler_data(desc); 97 dw_handle_msi_irq(pp); 98 99 chained_irq_exit(chip, desc); 100 } 101 102 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) 103 { 104 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 105 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 106 u64 msi_target; 107 108 msi_target = (u64)pp->msi_data; 109 110 msg->address_lo = lower_32_bits(msi_target); 111 msg->address_hi = upper_32_bits(msi_target); 112 113 msg->data = d->hwirq; 114 115 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", 116 (int)d->hwirq, msg->address_hi, msg->address_lo); 117 } 118 119 static int dw_pci_msi_set_affinity(struct irq_data *d, 120 const struct cpumask *mask, bool force) 121 { 122 return -EINVAL; 123 } 124 125 static void dw_pci_bottom_mask(struct irq_data *d) 126 { 127 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 128 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 129 unsigned int res, bit, ctrl; 130 unsigned long flags; 131 132 raw_spin_lock_irqsave(&pp->lock, flags); 133 134 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 135 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 136 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 137 138 pp->irq_mask[ctrl] |= BIT(bit); 139 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); 140 141 raw_spin_unlock_irqrestore(&pp->lock, flags); 142 } 143 144 static void dw_pci_bottom_unmask(struct irq_data *d) 145 { 146 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 147 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 148 unsigned int res, bit, ctrl; 149 unsigned long flags; 150 151 raw_spin_lock_irqsave(&pp->lock, flags); 152 153 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 154 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 155 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 156 157 pp->irq_mask[ctrl] &= ~BIT(bit); 158 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); 159 160 raw_spin_unlock_irqrestore(&pp->lock, flags); 161 } 162 163 static void dw_pci_bottom_ack(struct irq_data *d) 164 { 165 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 166 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 167 unsigned int res, bit, ctrl; 168 169 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 170 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 171 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 172 173 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); 174 } 175 176 static struct irq_chip dw_pci_msi_bottom_irq_chip = { 177 .name = "DWPCI-MSI", 178 .irq_ack = dw_pci_bottom_ack, 179 .irq_compose_msi_msg = dw_pci_setup_msi_msg, 180 .irq_set_affinity = dw_pci_msi_set_affinity, 181 .irq_mask = dw_pci_bottom_mask, 182 .irq_unmask = dw_pci_bottom_unmask, 183 }; 184 185 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, 186 unsigned int virq, unsigned int nr_irqs, 187 void *args) 188 { 189 struct dw_pcie_rp *pp = domain->host_data; 190 unsigned long flags; 191 u32 i; 192 int bit; 193 194 raw_spin_lock_irqsave(&pp->lock, flags); 195 196 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, 197 order_base_2(nr_irqs)); 198 199 raw_spin_unlock_irqrestore(&pp->lock, flags); 200 201 if (bit < 0) 202 return -ENOSPC; 203 204 for (i = 0; i < nr_irqs; i++) 205 irq_domain_set_info(domain, virq + i, bit + i, 206 pp->msi_irq_chip, 207 pp, handle_edge_irq, 208 NULL, NULL); 209 210 return 0; 211 } 212 213 static void dw_pcie_irq_domain_free(struct irq_domain *domain, 214 unsigned int virq, unsigned int nr_irqs) 215 { 216 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 217 struct dw_pcie_rp *pp = domain->host_data; 218 unsigned long flags; 219 220 raw_spin_lock_irqsave(&pp->lock, flags); 221 222 bitmap_release_region(pp->msi_irq_in_use, d->hwirq, 223 order_base_2(nr_irqs)); 224 225 raw_spin_unlock_irqrestore(&pp->lock, flags); 226 } 227 228 static const struct irq_domain_ops dw_pcie_msi_domain_ops = { 229 .alloc = dw_pcie_irq_domain_alloc, 230 .free = dw_pcie_irq_domain_free, 231 }; 232 233 int dw_pcie_allocate_domains(struct dw_pcie_rp *pp) 234 { 235 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 236 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); 237 238 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, 239 &dw_pcie_msi_domain_ops, pp); 240 if (!pp->irq_domain) { 241 dev_err(pci->dev, "Failed to create IRQ domain\n"); 242 return -ENOMEM; 243 } 244 245 irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); 246 247 pp->msi_domain = pci_msi_create_irq_domain(fwnode, 248 &dw_pcie_msi_domain_info, 249 pp->irq_domain); 250 if (!pp->msi_domain) { 251 dev_err(pci->dev, "Failed to create MSI domain\n"); 252 irq_domain_remove(pp->irq_domain); 253 return -ENOMEM; 254 } 255 256 return 0; 257 } 258 259 static void dw_pcie_free_msi(struct dw_pcie_rp *pp) 260 { 261 u32 ctrl; 262 263 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { 264 if (pp->msi_irq[ctrl] > 0) 265 irq_set_chained_handler_and_data(pp->msi_irq[ctrl], 266 NULL, NULL); 267 } 268 269 irq_domain_remove(pp->msi_domain); 270 irq_domain_remove(pp->irq_domain); 271 } 272 273 static void dw_pcie_msi_init(struct dw_pcie_rp *pp) 274 { 275 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 276 u64 msi_target = (u64)pp->msi_data; 277 278 if (!pci_msi_enabled() || !pp->has_msi_ctrl) 279 return; 280 281 /* Program the msi_data */ 282 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target)); 283 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target)); 284 } 285 286 static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp) 287 { 288 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 289 struct device *dev = pci->dev; 290 struct platform_device *pdev = to_platform_device(dev); 291 u32 ctrl, max_vectors; 292 int irq; 293 294 /* Parse any "msiX" IRQs described in the devicetree */ 295 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { 296 char msi_name[] = "msiX"; 297 298 msi_name[3] = '0' + ctrl; 299 irq = platform_get_irq_byname_optional(pdev, msi_name); 300 if (irq == -ENXIO) 301 break; 302 if (irq < 0) 303 return dev_err_probe(dev, irq, 304 "Failed to parse MSI IRQ '%s'\n", 305 msi_name); 306 307 pp->msi_irq[ctrl] = irq; 308 } 309 310 /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */ 311 if (ctrl == 0) 312 return -ENXIO; 313 314 max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL; 315 if (pp->num_vectors > max_vectors) { 316 dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n", 317 max_vectors); 318 pp->num_vectors = max_vectors; 319 } 320 if (!pp->num_vectors) 321 pp->num_vectors = max_vectors; 322 323 return 0; 324 } 325 326 static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) 327 { 328 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 329 struct device *dev = pci->dev; 330 struct platform_device *pdev = to_platform_device(dev); 331 u64 *msi_vaddr; 332 int ret; 333 u32 ctrl, num_ctrls; 334 335 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) 336 pp->irq_mask[ctrl] = ~0; 337 338 if (!pp->msi_irq[0]) { 339 ret = dw_pcie_parse_split_msi_irq(pp); 340 if (ret < 0 && ret != -ENXIO) 341 return ret; 342 } 343 344 if (!pp->num_vectors) 345 pp->num_vectors = MSI_DEF_NUM_VECTORS; 346 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 347 348 if (!pp->msi_irq[0]) { 349 pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi"); 350 if (pp->msi_irq[0] < 0) { 351 pp->msi_irq[0] = platform_get_irq(pdev, 0); 352 if (pp->msi_irq[0] < 0) 353 return pp->msi_irq[0]; 354 } 355 } 356 357 dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors); 358 359 pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; 360 361 ret = dw_pcie_allocate_domains(pp); 362 if (ret) 363 return ret; 364 365 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 366 if (pp->msi_irq[ctrl] > 0) 367 irq_set_chained_handler_and_data(pp->msi_irq[ctrl], 368 dw_chained_msi_isr, pp); 369 } 370 371 /* 372 * Even though the iMSI-RX Module supports 64-bit addresses some 373 * peripheral PCIe devices may lack 64-bit message support. In 374 * order not to miss MSI TLPs from those devices the MSI target 375 * address has to be within the lowest 4GB. 376 * 377 * Note until there is a better alternative found the reservation is 378 * done by allocating from the artificially limited DMA-coherent 379 * memory. 380 */ 381 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 382 if (ret) 383 dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n"); 384 385 msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, 386 GFP_KERNEL); 387 if (!msi_vaddr) { 388 dev_err(dev, "Failed to alloc and map MSI data\n"); 389 dw_pcie_free_msi(pp); 390 return -ENOMEM; 391 } 392 393 return 0; 394 } 395 396 int dw_pcie_host_init(struct dw_pcie_rp *pp) 397 { 398 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 399 struct device *dev = pci->dev; 400 struct device_node *np = dev->of_node; 401 struct platform_device *pdev = to_platform_device(dev); 402 struct resource_entry *win; 403 struct pci_host_bridge *bridge; 404 struct resource *res; 405 int ret; 406 407 raw_spin_lock_init(&pp->lock); 408 409 ret = dw_pcie_get_resources(pci); 410 if (ret) 411 return ret; 412 413 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 414 if (res) { 415 pp->cfg0_size = resource_size(res); 416 pp->cfg0_base = res->start; 417 418 pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); 419 if (IS_ERR(pp->va_cfg0_base)) 420 return PTR_ERR(pp->va_cfg0_base); 421 } else { 422 dev_err(dev, "Missing *config* reg space\n"); 423 return -ENODEV; 424 } 425 426 bridge = devm_pci_alloc_host_bridge(dev, 0); 427 if (!bridge) 428 return -ENOMEM; 429 430 pp->bridge = bridge; 431 432 /* Get the I/O range from DT */ 433 win = resource_list_first_type(&bridge->windows, IORESOURCE_IO); 434 if (win) { 435 pp->io_size = resource_size(win->res); 436 pp->io_bus_addr = win->res->start - win->offset; 437 pp->io_base = pci_pio_to_address(win->res->start); 438 } 439 440 /* Set default bus ops */ 441 bridge->ops = &dw_pcie_ops; 442 bridge->child_ops = &dw_child_pcie_ops; 443 444 if (pp->ops->init) { 445 ret = pp->ops->init(pp); 446 if (ret) 447 return ret; 448 } 449 450 if (pci_msi_enabled()) { 451 pp->has_msi_ctrl = !(pp->ops->msi_init || 452 of_property_read_bool(np, "msi-parent") || 453 of_property_read_bool(np, "msi-map")); 454 455 /* 456 * For the has_msi_ctrl case the default assignment is handled 457 * in the dw_pcie_msi_host_init(). 458 */ 459 if (!pp->has_msi_ctrl && !pp->num_vectors) { 460 pp->num_vectors = MSI_DEF_NUM_VECTORS; 461 } else if (pp->num_vectors > MAX_MSI_IRQS) { 462 dev_err(dev, "Invalid number of vectors\n"); 463 ret = -EINVAL; 464 goto err_deinit_host; 465 } 466 467 if (pp->ops->msi_init) { 468 ret = pp->ops->msi_init(pp); 469 if (ret < 0) 470 goto err_deinit_host; 471 } else if (pp->has_msi_ctrl) { 472 ret = dw_pcie_msi_host_init(pp); 473 if (ret < 0) 474 goto err_deinit_host; 475 } 476 } 477 478 dw_pcie_version_detect(pci); 479 480 dw_pcie_iatu_detect(pci); 481 482 ret = dw_pcie_edma_detect(pci); 483 if (ret) 484 goto err_free_msi; 485 486 ret = dw_pcie_setup_rc(pp); 487 if (ret) 488 goto err_remove_edma; 489 490 if (!dw_pcie_link_up(pci)) { 491 ret = dw_pcie_start_link(pci); 492 if (ret) 493 goto err_remove_edma; 494 } 495 496 /* Ignore errors, the link may come up later */ 497 dw_pcie_wait_for_link(pci); 498 499 bridge->sysdata = pp; 500 501 ret = pci_host_probe(bridge); 502 if (ret) 503 goto err_stop_link; 504 505 if (pp->ops->post_init) 506 pp->ops->post_init(pp); 507 508 return 0; 509 510 err_stop_link: 511 dw_pcie_stop_link(pci); 512 513 err_remove_edma: 514 dw_pcie_edma_remove(pci); 515 516 err_free_msi: 517 if (pp->has_msi_ctrl) 518 dw_pcie_free_msi(pp); 519 520 err_deinit_host: 521 if (pp->ops->deinit) 522 pp->ops->deinit(pp); 523 524 return ret; 525 } 526 EXPORT_SYMBOL_GPL(dw_pcie_host_init); 527 528 void dw_pcie_host_deinit(struct dw_pcie_rp *pp) 529 { 530 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 531 532 pci_stop_root_bus(pp->bridge->bus); 533 pci_remove_root_bus(pp->bridge->bus); 534 535 dw_pcie_stop_link(pci); 536 537 dw_pcie_edma_remove(pci); 538 539 if (pp->has_msi_ctrl) 540 dw_pcie_free_msi(pp); 541 542 if (pp->ops->deinit) 543 pp->ops->deinit(pp); 544 } 545 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); 546 547 static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, 548 unsigned int devfn, int where) 549 { 550 struct dw_pcie_rp *pp = bus->sysdata; 551 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 552 int type, ret; 553 u32 busdev; 554 555 /* 556 * Checking whether the link is up here is a last line of defense 557 * against platforms that forward errors on the system bus as 558 * SError upon PCI configuration transactions issued when the link 559 * is down. This check is racy by definition and does not stop 560 * the system from triggering an SError if the link goes down 561 * after this check is performed. 562 */ 563 if (!dw_pcie_link_up(pci)) 564 return NULL; 565 566 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 567 PCIE_ATU_FUNC(PCI_FUNC(devfn)); 568 569 if (pci_is_root_bus(bus->parent)) 570 type = PCIE_ATU_TYPE_CFG0; 571 else 572 type = PCIE_ATU_TYPE_CFG1; 573 574 ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, 575 pp->cfg0_size); 576 if (ret) 577 return NULL; 578 579 return pp->va_cfg0_base + where; 580 } 581 582 static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, 583 int where, int size, u32 *val) 584 { 585 struct dw_pcie_rp *pp = bus->sysdata; 586 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 587 int ret; 588 589 ret = pci_generic_config_read(bus, devfn, where, size, val); 590 if (ret != PCIBIOS_SUCCESSFUL) 591 return ret; 592 593 if (pp->cfg0_io_shared) { 594 ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, 595 pp->io_base, pp->io_bus_addr, 596 pp->io_size); 597 if (ret) 598 return PCIBIOS_SET_FAILED; 599 } 600 601 return PCIBIOS_SUCCESSFUL; 602 } 603 604 static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, 605 int where, int size, u32 val) 606 { 607 struct dw_pcie_rp *pp = bus->sysdata; 608 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 609 int ret; 610 611 ret = pci_generic_config_write(bus, devfn, where, size, val); 612 if (ret != PCIBIOS_SUCCESSFUL) 613 return ret; 614 615 if (pp->cfg0_io_shared) { 616 ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, 617 pp->io_base, pp->io_bus_addr, 618 pp->io_size); 619 if (ret) 620 return PCIBIOS_SET_FAILED; 621 } 622 623 return PCIBIOS_SUCCESSFUL; 624 } 625 626 static struct pci_ops dw_child_pcie_ops = { 627 .map_bus = dw_pcie_other_conf_map_bus, 628 .read = dw_pcie_rd_other_conf, 629 .write = dw_pcie_wr_other_conf, 630 }; 631 632 void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) 633 { 634 struct dw_pcie_rp *pp = bus->sysdata; 635 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 636 637 if (PCI_SLOT(devfn) > 0) 638 return NULL; 639 640 return pci->dbi_base + where; 641 } 642 EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus); 643 644 static struct pci_ops dw_pcie_ops = { 645 .map_bus = dw_pcie_own_conf_map_bus, 646 .read = pci_generic_config_read, 647 .write = pci_generic_config_write, 648 }; 649 650 static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) 651 { 652 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 653 struct resource_entry *entry; 654 int i, ret; 655 656 /* Note the very first outbound ATU is used for CFG IOs */ 657 if (!pci->num_ob_windows) { 658 dev_err(pci->dev, "No outbound iATU found\n"); 659 return -EINVAL; 660 } 661 662 /* 663 * Ensure all out/inbound windows are disabled before proceeding with 664 * the MEM/IO (dma-)ranges setups. 665 */ 666 for (i = 0; i < pci->num_ob_windows; i++) 667 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i); 668 669 for (i = 0; i < pci->num_ib_windows; i++) 670 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i); 671 672 i = 0; 673 resource_list_for_each_entry(entry, &pp->bridge->windows) { 674 if (resource_type(entry->res) != IORESOURCE_MEM) 675 continue; 676 677 if (pci->num_ob_windows <= ++i) 678 break; 679 680 ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM, 681 entry->res->start, 682 entry->res->start - entry->offset, 683 resource_size(entry->res)); 684 if (ret) { 685 dev_err(pci->dev, "Failed to set MEM range %pr\n", 686 entry->res); 687 return ret; 688 } 689 } 690 691 if (pp->io_size) { 692 if (pci->num_ob_windows > ++i) { 693 ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO, 694 pp->io_base, 695 pp->io_bus_addr, 696 pp->io_size); 697 if (ret) { 698 dev_err(pci->dev, "Failed to set IO range %pr\n", 699 entry->res); 700 return ret; 701 } 702 } else { 703 pp->cfg0_io_shared = true; 704 } 705 } 706 707 if (pci->num_ob_windows <= i) 708 dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n", 709 pci->num_ob_windows); 710 711 i = 0; 712 resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) { 713 if (resource_type(entry->res) != IORESOURCE_MEM) 714 continue; 715 716 if (pci->num_ib_windows <= i) 717 break; 718 719 ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM, 720 entry->res->start, 721 entry->res->start - entry->offset, 722 resource_size(entry->res)); 723 if (ret) { 724 dev_err(pci->dev, "Failed to set DMA range %pr\n", 725 entry->res); 726 return ret; 727 } 728 } 729 730 if (pci->num_ib_windows <= i) 731 dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n", 732 pci->num_ib_windows); 733 734 return 0; 735 } 736 737 int dw_pcie_setup_rc(struct dw_pcie_rp *pp) 738 { 739 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 740 u32 val, ctrl, num_ctrls; 741 int ret; 742 743 /* 744 * Enable DBI read-only registers for writing/updating configuration. 745 * Write permission gets disabled towards the end of this function. 746 */ 747 dw_pcie_dbi_ro_wr_en(pci); 748 749 dw_pcie_setup(pci); 750 751 if (pp->has_msi_ctrl) { 752 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 753 754 /* Initialize IRQ Status array */ 755 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 756 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + 757 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 758 pp->irq_mask[ctrl]); 759 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + 760 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 761 ~0); 762 } 763 } 764 765 dw_pcie_msi_init(pp); 766 767 /* Setup RC BARs */ 768 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); 769 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); 770 771 /* Setup interrupt pins */ 772 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); 773 val &= 0xffff00ff; 774 val |= 0x00000100; 775 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); 776 777 /* Setup bus numbers */ 778 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); 779 val &= 0xff000000; 780 val |= 0x00ff0100; 781 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); 782 783 /* Setup command register */ 784 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 785 val &= 0xffff0000; 786 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 787 PCI_COMMAND_MASTER | PCI_COMMAND_SERR; 788 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 789 790 /* 791 * If the platform provides its own child bus config accesses, it means 792 * the platform uses its own address translation component rather than 793 * ATU, so we should not program the ATU here. 794 */ 795 if (pp->bridge->child_ops == &dw_child_pcie_ops) { 796 ret = dw_pcie_iatu_setup(pp); 797 if (ret) 798 return ret; 799 } 800 801 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); 802 803 /* Program correct class for RC */ 804 dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); 805 806 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 807 val |= PORT_LOGIC_SPEED_CHANGE; 808 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 809 810 dw_pcie_dbi_ro_wr_dis(pci); 811 812 return 0; 813 } 814 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); 815 816 int dw_pcie_suspend_noirq(struct dw_pcie *pci) 817 { 818 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 819 u32 val; 820 int ret; 821 822 /* 823 * If L1SS is supported, then do not put the link into L2 as some 824 * devices such as NVMe expect low resume latency. 825 */ 826 if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1) 827 return 0; 828 829 if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT) 830 return 0; 831 832 if (!pci->pp.ops->pme_turn_off) 833 return 0; 834 835 pci->pp.ops->pme_turn_off(&pci->pp); 836 837 ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE, 838 PCIE_PME_TO_L2_TIMEOUT_US/10, 839 PCIE_PME_TO_L2_TIMEOUT_US, false, pci); 840 if (ret) { 841 dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val); 842 return ret; 843 } 844 845 if (pci->pp.ops->deinit) 846 pci->pp.ops->deinit(&pci->pp); 847 848 pci->suspended = true; 849 850 return ret; 851 } 852 EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq); 853 854 int dw_pcie_resume_noirq(struct dw_pcie *pci) 855 { 856 int ret; 857 858 if (!pci->suspended) 859 return 0; 860 861 pci->suspended = false; 862 863 if (pci->pp.ops->init) { 864 ret = pci->pp.ops->init(&pci->pp); 865 if (ret) { 866 dev_err(pci->dev, "Host init failed: %d\n", ret); 867 return ret; 868 } 869 } 870 871 dw_pcie_setup_rc(&pci->pp); 872 873 ret = dw_pcie_start_link(pci); 874 if (ret) 875 return ret; 876 877 ret = dw_pcie_wait_for_link(pci); 878 if (ret) 879 return ret; 880 881 return ret; 882 } 883 EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq); 884