1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Synopsys DesignWare PCIe host controller driver 4 * 5 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 6 * https://www.samsung.com 7 * 8 * Author: Jingoo Han <jg1.han@samsung.com> 9 */ 10 11 #include <linux/iopoll.h> 12 #include <linux/irqchip/chained_irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/msi.h> 15 #include <linux/of_address.h> 16 #include <linux/of_pci.h> 17 #include <linux/pci_regs.h> 18 #include <linux/platform_device.h> 19 20 #include "../../pci.h" 21 #include "pcie-designware.h" 22 23 static struct pci_ops dw_pcie_ops; 24 static struct pci_ops dw_child_pcie_ops; 25 26 static void dw_msi_ack_irq(struct irq_data *d) 27 { 28 irq_chip_ack_parent(d); 29 } 30 31 static void dw_msi_mask_irq(struct irq_data *d) 32 { 33 pci_msi_mask_irq(d); 34 irq_chip_mask_parent(d); 35 } 36 37 static void dw_msi_unmask_irq(struct irq_data *d) 38 { 39 pci_msi_unmask_irq(d); 40 irq_chip_unmask_parent(d); 41 } 42 43 static struct irq_chip dw_pcie_msi_irq_chip = { 44 .name = "PCI-MSI", 45 .irq_ack = dw_msi_ack_irq, 46 .irq_mask = dw_msi_mask_irq, 47 .irq_unmask = dw_msi_unmask_irq, 48 }; 49 50 static struct msi_domain_info dw_pcie_msi_domain_info = { 51 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 52 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), 53 .chip = &dw_pcie_msi_irq_chip, 54 }; 55 56 /* MSI int handler */ 57 irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp) 58 { 59 int i, pos; 60 unsigned long val; 61 u32 status, num_ctrls; 62 irqreturn_t ret = IRQ_NONE; 63 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 64 65 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 66 67 for (i = 0; i < num_ctrls; i++) { 68 status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + 69 (i * MSI_REG_CTRL_BLOCK_SIZE)); 70 if (!status) 71 continue; 72 73 ret = IRQ_HANDLED; 74 val = status; 75 pos = 0; 76 while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 77 pos)) != MAX_MSI_IRQS_PER_CTRL) { 78 generic_handle_domain_irq(pp->irq_domain, 79 (i * MAX_MSI_IRQS_PER_CTRL) + 80 pos); 81 pos++; 82 } 83 } 84 85 return ret; 86 } 87 88 /* Chained MSI interrupt service routine */ 89 static void dw_chained_msi_isr(struct irq_desc *desc) 90 { 91 struct irq_chip *chip = irq_desc_get_chip(desc); 92 struct dw_pcie_rp *pp; 93 94 chained_irq_enter(chip, desc); 95 96 pp = irq_desc_get_handler_data(desc); 97 dw_handle_msi_irq(pp); 98 99 chained_irq_exit(chip, desc); 100 } 101 102 static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) 103 { 104 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 105 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 106 u64 msi_target; 107 108 msi_target = (u64)pp->msi_data; 109 110 msg->address_lo = lower_32_bits(msi_target); 111 msg->address_hi = upper_32_bits(msi_target); 112 113 msg->data = d->hwirq; 114 115 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", 116 (int)d->hwirq, msg->address_hi, msg->address_lo); 117 } 118 119 static int dw_pci_msi_set_affinity(struct irq_data *d, 120 const struct cpumask *mask, bool force) 121 { 122 return -EINVAL; 123 } 124 125 static void dw_pci_bottom_mask(struct irq_data *d) 126 { 127 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 128 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 129 unsigned int res, bit, ctrl; 130 unsigned long flags; 131 132 raw_spin_lock_irqsave(&pp->lock, flags); 133 134 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 135 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 136 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 137 138 pp->irq_mask[ctrl] |= BIT(bit); 139 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); 140 141 raw_spin_unlock_irqrestore(&pp->lock, flags); 142 } 143 144 static void dw_pci_bottom_unmask(struct irq_data *d) 145 { 146 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 147 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 148 unsigned int res, bit, ctrl; 149 unsigned long flags; 150 151 raw_spin_lock_irqsave(&pp->lock, flags); 152 153 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 154 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 155 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 156 157 pp->irq_mask[ctrl] &= ~BIT(bit); 158 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); 159 160 raw_spin_unlock_irqrestore(&pp->lock, flags); 161 } 162 163 static void dw_pci_bottom_ack(struct irq_data *d) 164 { 165 struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); 166 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 167 unsigned int res, bit, ctrl; 168 169 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 170 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 171 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 172 173 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); 174 } 175 176 static struct irq_chip dw_pci_msi_bottom_irq_chip = { 177 .name = "DWPCI-MSI", 178 .irq_ack = dw_pci_bottom_ack, 179 .irq_compose_msi_msg = dw_pci_setup_msi_msg, 180 .irq_set_affinity = dw_pci_msi_set_affinity, 181 .irq_mask = dw_pci_bottom_mask, 182 .irq_unmask = dw_pci_bottom_unmask, 183 }; 184 185 static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, 186 unsigned int virq, unsigned int nr_irqs, 187 void *args) 188 { 189 struct dw_pcie_rp *pp = domain->host_data; 190 unsigned long flags; 191 u32 i; 192 int bit; 193 194 raw_spin_lock_irqsave(&pp->lock, flags); 195 196 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, 197 order_base_2(nr_irqs)); 198 199 raw_spin_unlock_irqrestore(&pp->lock, flags); 200 201 if (bit < 0) 202 return -ENOSPC; 203 204 for (i = 0; i < nr_irqs; i++) 205 irq_domain_set_info(domain, virq + i, bit + i, 206 pp->msi_irq_chip, 207 pp, handle_edge_irq, 208 NULL, NULL); 209 210 return 0; 211 } 212 213 static void dw_pcie_irq_domain_free(struct irq_domain *domain, 214 unsigned int virq, unsigned int nr_irqs) 215 { 216 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 217 struct dw_pcie_rp *pp = domain->host_data; 218 unsigned long flags; 219 220 raw_spin_lock_irqsave(&pp->lock, flags); 221 222 bitmap_release_region(pp->msi_irq_in_use, d->hwirq, 223 order_base_2(nr_irqs)); 224 225 raw_spin_unlock_irqrestore(&pp->lock, flags); 226 } 227 228 static const struct irq_domain_ops dw_pcie_msi_domain_ops = { 229 .alloc = dw_pcie_irq_domain_alloc, 230 .free = dw_pcie_irq_domain_free, 231 }; 232 233 int dw_pcie_allocate_domains(struct dw_pcie_rp *pp) 234 { 235 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 236 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); 237 238 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, 239 &dw_pcie_msi_domain_ops, pp); 240 if (!pp->irq_domain) { 241 dev_err(pci->dev, "Failed to create IRQ domain\n"); 242 return -ENOMEM; 243 } 244 245 irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); 246 247 pp->msi_domain = pci_msi_create_irq_domain(fwnode, 248 &dw_pcie_msi_domain_info, 249 pp->irq_domain); 250 if (!pp->msi_domain) { 251 dev_err(pci->dev, "Failed to create MSI domain\n"); 252 irq_domain_remove(pp->irq_domain); 253 return -ENOMEM; 254 } 255 256 return 0; 257 } 258 259 static void dw_pcie_free_msi(struct dw_pcie_rp *pp) 260 { 261 u32 ctrl; 262 263 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { 264 if (pp->msi_irq[ctrl] > 0) 265 irq_set_chained_handler_and_data(pp->msi_irq[ctrl], 266 NULL, NULL); 267 } 268 269 irq_domain_remove(pp->msi_domain); 270 irq_domain_remove(pp->irq_domain); 271 } 272 273 static void dw_pcie_msi_init(struct dw_pcie_rp *pp) 274 { 275 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 276 u64 msi_target = (u64)pp->msi_data; 277 278 if (!pci_msi_enabled() || !pp->has_msi_ctrl) 279 return; 280 281 /* Program the msi_data */ 282 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target)); 283 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target)); 284 } 285 286 static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp) 287 { 288 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 289 struct device *dev = pci->dev; 290 struct platform_device *pdev = to_platform_device(dev); 291 u32 ctrl, max_vectors; 292 int irq; 293 294 /* Parse any "msiX" IRQs described in the devicetree */ 295 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) { 296 char msi_name[] = "msiX"; 297 298 msi_name[3] = '0' + ctrl; 299 irq = platform_get_irq_byname_optional(pdev, msi_name); 300 if (irq == -ENXIO) 301 break; 302 if (irq < 0) 303 return dev_err_probe(dev, irq, 304 "Failed to parse MSI IRQ '%s'\n", 305 msi_name); 306 307 pp->msi_irq[ctrl] = irq; 308 } 309 310 /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */ 311 if (ctrl == 0) 312 return -ENXIO; 313 314 max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL; 315 if (pp->num_vectors > max_vectors) { 316 dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n", 317 max_vectors); 318 pp->num_vectors = max_vectors; 319 } 320 if (!pp->num_vectors) 321 pp->num_vectors = max_vectors; 322 323 return 0; 324 } 325 326 static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) 327 { 328 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 329 struct device *dev = pci->dev; 330 struct platform_device *pdev = to_platform_device(dev); 331 u64 *msi_vaddr = NULL; 332 int ret; 333 u32 ctrl, num_ctrls; 334 335 for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) 336 pp->irq_mask[ctrl] = ~0; 337 338 if (!pp->msi_irq[0]) { 339 ret = dw_pcie_parse_split_msi_irq(pp); 340 if (ret < 0 && ret != -ENXIO) 341 return ret; 342 } 343 344 if (!pp->num_vectors) 345 pp->num_vectors = MSI_DEF_NUM_VECTORS; 346 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 347 348 if (!pp->msi_irq[0]) { 349 pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi"); 350 if (pp->msi_irq[0] < 0) { 351 pp->msi_irq[0] = platform_get_irq(pdev, 0); 352 if (pp->msi_irq[0] < 0) 353 return pp->msi_irq[0]; 354 } 355 } 356 357 dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors); 358 359 pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; 360 361 ret = dw_pcie_allocate_domains(pp); 362 if (ret) 363 return ret; 364 365 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 366 if (pp->msi_irq[ctrl] > 0) 367 irq_set_chained_handler_and_data(pp->msi_irq[ctrl], 368 dw_chained_msi_isr, pp); 369 } 370 371 /* 372 * Even though the iMSI-RX Module supports 64-bit addresses some 373 * peripheral PCIe devices may lack 64-bit message support. In 374 * order not to miss MSI TLPs from those devices the MSI target 375 * address has to be within the lowest 4GB. 376 * 377 * Note until there is a better alternative found the reservation is 378 * done by allocating from the artificially limited DMA-coherent 379 * memory. 380 */ 381 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 382 if (!ret) 383 msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, 384 GFP_KERNEL); 385 386 if (!msi_vaddr) { 387 dev_warn(dev, "Failed to allocate 32-bit MSI address\n"); 388 dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); 389 msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, 390 GFP_KERNEL); 391 if (!msi_vaddr) { 392 dev_err(dev, "Failed to allocate MSI address\n"); 393 dw_pcie_free_msi(pp); 394 return -ENOMEM; 395 } 396 } 397 398 return 0; 399 } 400 401 static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp) 402 { 403 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 404 struct resource_entry *win; 405 struct resource *res; 406 407 win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); 408 if (win) { 409 res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL); 410 if (!res) 411 return; 412 413 /* 414 * Allocate MSG TLP region of size 'region_align' at the end of 415 * the host bridge window. 416 */ 417 res->start = win->res->end - pci->region_align + 1; 418 res->end = win->res->end; 419 res->name = "msg"; 420 res->flags = win->res->flags | IORESOURCE_BUSY; 421 422 if (!devm_request_resource(pci->dev, win->res, res)) 423 pp->msg_res = res; 424 } 425 } 426 427 int dw_pcie_host_init(struct dw_pcie_rp *pp) 428 { 429 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 430 struct device *dev = pci->dev; 431 struct device_node *np = dev->of_node; 432 struct platform_device *pdev = to_platform_device(dev); 433 struct resource_entry *win; 434 struct pci_host_bridge *bridge; 435 struct resource *res; 436 int ret; 437 438 raw_spin_lock_init(&pp->lock); 439 440 ret = dw_pcie_get_resources(pci); 441 if (ret) 442 return ret; 443 444 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 445 if (res) { 446 pp->cfg0_size = resource_size(res); 447 pp->cfg0_base = res->start; 448 449 pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); 450 if (IS_ERR(pp->va_cfg0_base)) 451 return PTR_ERR(pp->va_cfg0_base); 452 } else { 453 dev_err(dev, "Missing *config* reg space\n"); 454 return -ENODEV; 455 } 456 457 bridge = devm_pci_alloc_host_bridge(dev, 0); 458 if (!bridge) 459 return -ENOMEM; 460 461 pp->bridge = bridge; 462 463 /* Get the I/O range from DT */ 464 win = resource_list_first_type(&bridge->windows, IORESOURCE_IO); 465 if (win) { 466 pp->io_size = resource_size(win->res); 467 pp->io_bus_addr = win->res->start - win->offset; 468 pp->io_base = pci_pio_to_address(win->res->start); 469 } 470 471 /* Set default bus ops */ 472 bridge->ops = &dw_pcie_ops; 473 bridge->child_ops = &dw_child_pcie_ops; 474 475 if (pp->ops->init) { 476 ret = pp->ops->init(pp); 477 if (ret) 478 return ret; 479 } 480 481 if (pci_msi_enabled()) { 482 pp->has_msi_ctrl = !(pp->ops->msi_init || 483 of_property_read_bool(np, "msi-parent") || 484 of_property_read_bool(np, "msi-map")); 485 486 /* 487 * For the has_msi_ctrl case the default assignment is handled 488 * in the dw_pcie_msi_host_init(). 489 */ 490 if (!pp->has_msi_ctrl && !pp->num_vectors) { 491 pp->num_vectors = MSI_DEF_NUM_VECTORS; 492 } else if (pp->num_vectors > MAX_MSI_IRQS) { 493 dev_err(dev, "Invalid number of vectors\n"); 494 ret = -EINVAL; 495 goto err_deinit_host; 496 } 497 498 if (pp->ops->msi_init) { 499 ret = pp->ops->msi_init(pp); 500 if (ret < 0) 501 goto err_deinit_host; 502 } else if (pp->has_msi_ctrl) { 503 ret = dw_pcie_msi_host_init(pp); 504 if (ret < 0) 505 goto err_deinit_host; 506 } 507 } 508 509 dw_pcie_version_detect(pci); 510 511 dw_pcie_iatu_detect(pci); 512 513 /* 514 * Allocate the resource for MSG TLP before programming the iATU 515 * outbound window in dw_pcie_setup_rc(). Since the allocation depends 516 * on the value of 'region_align', this has to be done after 517 * dw_pcie_iatu_detect(). 518 * 519 * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to 520 * make use of the generic MSG TLP implementation. 521 */ 522 if (pp->use_atu_msg) 523 dw_pcie_host_request_msg_tlp_res(pp); 524 525 ret = dw_pcie_edma_detect(pci); 526 if (ret) 527 goto err_free_msi; 528 529 ret = dw_pcie_setup_rc(pp); 530 if (ret) 531 goto err_remove_edma; 532 533 if (!dw_pcie_link_up(pci)) { 534 ret = dw_pcie_start_link(pci); 535 if (ret) 536 goto err_remove_edma; 537 } 538 539 /* Ignore errors, the link may come up later */ 540 dw_pcie_wait_for_link(pci); 541 542 bridge->sysdata = pp; 543 544 ret = pci_host_probe(bridge); 545 if (ret) 546 goto err_stop_link; 547 548 if (pp->ops->post_init) 549 pp->ops->post_init(pp); 550 551 return 0; 552 553 err_stop_link: 554 dw_pcie_stop_link(pci); 555 556 err_remove_edma: 557 dw_pcie_edma_remove(pci); 558 559 err_free_msi: 560 if (pp->has_msi_ctrl) 561 dw_pcie_free_msi(pp); 562 563 err_deinit_host: 564 if (pp->ops->deinit) 565 pp->ops->deinit(pp); 566 567 return ret; 568 } 569 EXPORT_SYMBOL_GPL(dw_pcie_host_init); 570 571 void dw_pcie_host_deinit(struct dw_pcie_rp *pp) 572 { 573 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 574 575 pci_stop_root_bus(pp->bridge->bus); 576 pci_remove_root_bus(pp->bridge->bus); 577 578 dw_pcie_stop_link(pci); 579 580 dw_pcie_edma_remove(pci); 581 582 if (pp->has_msi_ctrl) 583 dw_pcie_free_msi(pp); 584 585 if (pp->ops->deinit) 586 pp->ops->deinit(pp); 587 } 588 EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); 589 590 static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, 591 unsigned int devfn, int where) 592 { 593 struct dw_pcie_rp *pp = bus->sysdata; 594 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 595 struct dw_pcie_ob_atu_cfg atu = { 0 }; 596 int type, ret; 597 u32 busdev; 598 599 /* 600 * Checking whether the link is up here is a last line of defense 601 * against platforms that forward errors on the system bus as 602 * SError upon PCI configuration transactions issued when the link 603 * is down. This check is racy by definition and does not stop 604 * the system from triggering an SError if the link goes down 605 * after this check is performed. 606 */ 607 if (!dw_pcie_link_up(pci)) 608 return NULL; 609 610 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 611 PCIE_ATU_FUNC(PCI_FUNC(devfn)); 612 613 if (pci_is_root_bus(bus->parent)) 614 type = PCIE_ATU_TYPE_CFG0; 615 else 616 type = PCIE_ATU_TYPE_CFG1; 617 618 atu.type = type; 619 atu.cpu_addr = pp->cfg0_base; 620 atu.pci_addr = busdev; 621 atu.size = pp->cfg0_size; 622 623 ret = dw_pcie_prog_outbound_atu(pci, &atu); 624 if (ret) 625 return NULL; 626 627 return pp->va_cfg0_base + where; 628 } 629 630 static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, 631 int where, int size, u32 *val) 632 { 633 struct dw_pcie_rp *pp = bus->sysdata; 634 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 635 struct dw_pcie_ob_atu_cfg atu = { 0 }; 636 int ret; 637 638 ret = pci_generic_config_read(bus, devfn, where, size, val); 639 if (ret != PCIBIOS_SUCCESSFUL) 640 return ret; 641 642 if (pp->cfg0_io_shared) { 643 atu.type = PCIE_ATU_TYPE_IO; 644 atu.cpu_addr = pp->io_base; 645 atu.pci_addr = pp->io_bus_addr; 646 atu.size = pp->io_size; 647 648 ret = dw_pcie_prog_outbound_atu(pci, &atu); 649 if (ret) 650 return PCIBIOS_SET_FAILED; 651 } 652 653 return PCIBIOS_SUCCESSFUL; 654 } 655 656 static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, 657 int where, int size, u32 val) 658 { 659 struct dw_pcie_rp *pp = bus->sysdata; 660 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 661 struct dw_pcie_ob_atu_cfg atu = { 0 }; 662 int ret; 663 664 ret = pci_generic_config_write(bus, devfn, where, size, val); 665 if (ret != PCIBIOS_SUCCESSFUL) 666 return ret; 667 668 if (pp->cfg0_io_shared) { 669 atu.type = PCIE_ATU_TYPE_IO; 670 atu.cpu_addr = pp->io_base; 671 atu.pci_addr = pp->io_bus_addr; 672 atu.size = pp->io_size; 673 674 ret = dw_pcie_prog_outbound_atu(pci, &atu); 675 if (ret) 676 return PCIBIOS_SET_FAILED; 677 } 678 679 return PCIBIOS_SUCCESSFUL; 680 } 681 682 static struct pci_ops dw_child_pcie_ops = { 683 .map_bus = dw_pcie_other_conf_map_bus, 684 .read = dw_pcie_rd_other_conf, 685 .write = dw_pcie_wr_other_conf, 686 }; 687 688 void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) 689 { 690 struct dw_pcie_rp *pp = bus->sysdata; 691 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 692 693 if (PCI_SLOT(devfn) > 0) 694 return NULL; 695 696 return pci->dbi_base + where; 697 } 698 EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus); 699 700 static struct pci_ops dw_pcie_ops = { 701 .map_bus = dw_pcie_own_conf_map_bus, 702 .read = pci_generic_config_read, 703 .write = pci_generic_config_write, 704 }; 705 706 static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) 707 { 708 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 709 struct dw_pcie_ob_atu_cfg atu = { 0 }; 710 struct resource_entry *entry; 711 int i, ret; 712 713 /* Note the very first outbound ATU is used for CFG IOs */ 714 if (!pci->num_ob_windows) { 715 dev_err(pci->dev, "No outbound iATU found\n"); 716 return -EINVAL; 717 } 718 719 /* 720 * Ensure all out/inbound windows are disabled before proceeding with 721 * the MEM/IO (dma-)ranges setups. 722 */ 723 for (i = 0; i < pci->num_ob_windows; i++) 724 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i); 725 726 for (i = 0; i < pci->num_ib_windows; i++) 727 dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i); 728 729 i = 0; 730 resource_list_for_each_entry(entry, &pp->bridge->windows) { 731 if (resource_type(entry->res) != IORESOURCE_MEM) 732 continue; 733 734 if (pci->num_ob_windows <= ++i) 735 break; 736 737 atu.index = i; 738 atu.type = PCIE_ATU_TYPE_MEM; 739 atu.cpu_addr = entry->res->start; 740 atu.pci_addr = entry->res->start - entry->offset; 741 742 /* Adjust iATU size if MSG TLP region was allocated before */ 743 if (pp->msg_res && pp->msg_res->parent == entry->res) 744 atu.size = resource_size(entry->res) - 745 resource_size(pp->msg_res); 746 else 747 atu.size = resource_size(entry->res); 748 749 ret = dw_pcie_prog_outbound_atu(pci, &atu); 750 if (ret) { 751 dev_err(pci->dev, "Failed to set MEM range %pr\n", 752 entry->res); 753 return ret; 754 } 755 } 756 757 if (pp->io_size) { 758 if (pci->num_ob_windows > ++i) { 759 atu.index = i; 760 atu.type = PCIE_ATU_TYPE_IO; 761 atu.cpu_addr = pp->io_base; 762 atu.pci_addr = pp->io_bus_addr; 763 atu.size = pp->io_size; 764 765 ret = dw_pcie_prog_outbound_atu(pci, &atu); 766 if (ret) { 767 dev_err(pci->dev, "Failed to set IO range %pr\n", 768 entry->res); 769 return ret; 770 } 771 } else { 772 pp->cfg0_io_shared = true; 773 } 774 } 775 776 if (pci->num_ob_windows <= i) 777 dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n", 778 pci->num_ob_windows); 779 780 pp->msg_atu_index = i; 781 782 i = 0; 783 resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) { 784 if (resource_type(entry->res) != IORESOURCE_MEM) 785 continue; 786 787 if (pci->num_ib_windows <= i) 788 break; 789 790 ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM, 791 entry->res->start, 792 entry->res->start - entry->offset, 793 resource_size(entry->res)); 794 if (ret) { 795 dev_err(pci->dev, "Failed to set DMA range %pr\n", 796 entry->res); 797 return ret; 798 } 799 } 800 801 if (pci->num_ib_windows <= i) 802 dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n", 803 pci->num_ib_windows); 804 805 return 0; 806 } 807 808 int dw_pcie_setup_rc(struct dw_pcie_rp *pp) 809 { 810 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 811 u32 val, ctrl, num_ctrls; 812 int ret; 813 814 /* 815 * Enable DBI read-only registers for writing/updating configuration. 816 * Write permission gets disabled towards the end of this function. 817 */ 818 dw_pcie_dbi_ro_wr_en(pci); 819 820 dw_pcie_setup(pci); 821 822 if (pp->has_msi_ctrl) { 823 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 824 825 /* Initialize IRQ Status array */ 826 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 827 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + 828 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 829 pp->irq_mask[ctrl]); 830 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + 831 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 832 ~0); 833 } 834 } 835 836 dw_pcie_msi_init(pp); 837 838 /* Setup RC BARs */ 839 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); 840 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); 841 842 /* Setup interrupt pins */ 843 val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); 844 val &= 0xffff00ff; 845 val |= 0x00000100; 846 dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); 847 848 /* Setup bus numbers */ 849 val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); 850 val &= 0xff000000; 851 val |= 0x00ff0100; 852 dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); 853 854 /* Setup command register */ 855 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 856 val &= 0xffff0000; 857 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 858 PCI_COMMAND_MASTER | PCI_COMMAND_SERR; 859 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 860 861 /* 862 * If the platform provides its own child bus config accesses, it means 863 * the platform uses its own address translation component rather than 864 * ATU, so we should not program the ATU here. 865 */ 866 if (pp->bridge->child_ops == &dw_child_pcie_ops) { 867 ret = dw_pcie_iatu_setup(pp); 868 if (ret) 869 return ret; 870 } 871 872 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); 873 874 /* Program correct class for RC */ 875 dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); 876 877 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 878 val |= PORT_LOGIC_SPEED_CHANGE; 879 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 880 881 dw_pcie_dbi_ro_wr_dis(pci); 882 883 return 0; 884 } 885 EXPORT_SYMBOL_GPL(dw_pcie_setup_rc); 886 887 static int dw_pcie_pme_turn_off(struct dw_pcie *pci) 888 { 889 struct dw_pcie_ob_atu_cfg atu = { 0 }; 890 void __iomem *mem; 891 int ret; 892 893 if (pci->num_ob_windows <= pci->pp.msg_atu_index) 894 return -ENOSPC; 895 896 if (!pci->pp.msg_res) 897 return -ENOSPC; 898 899 atu.code = PCIE_MSG_CODE_PME_TURN_OFF; 900 atu.routing = PCIE_MSG_TYPE_R_BC; 901 atu.type = PCIE_ATU_TYPE_MSG; 902 atu.size = resource_size(pci->pp.msg_res); 903 atu.index = pci->pp.msg_atu_index; 904 905 atu.cpu_addr = pci->pp.msg_res->start; 906 907 ret = dw_pcie_prog_outbound_atu(pci, &atu); 908 if (ret) 909 return ret; 910 911 mem = ioremap(atu.cpu_addr, pci->region_align); 912 if (!mem) 913 return -ENOMEM; 914 915 /* A dummy write is converted to a Msg TLP */ 916 writel(0, mem); 917 918 iounmap(mem); 919 920 return 0; 921 } 922 923 int dw_pcie_suspend_noirq(struct dw_pcie *pci) 924 { 925 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 926 u32 val; 927 int ret = 0; 928 929 /* 930 * If L1SS is supported, then do not put the link into L2 as some 931 * devices such as NVMe expect low resume latency. 932 */ 933 if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1) 934 return 0; 935 936 if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT) 937 return 0; 938 939 if (pci->pp.ops->pme_turn_off) 940 pci->pp.ops->pme_turn_off(&pci->pp); 941 else 942 ret = dw_pcie_pme_turn_off(pci); 943 944 if (ret) 945 return ret; 946 947 ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE, 948 PCIE_PME_TO_L2_TIMEOUT_US/10, 949 PCIE_PME_TO_L2_TIMEOUT_US, false, pci); 950 if (ret) { 951 dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val); 952 return ret; 953 } 954 955 if (pci->pp.ops->deinit) 956 pci->pp.ops->deinit(&pci->pp); 957 958 pci->suspended = true; 959 960 return ret; 961 } 962 EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq); 963 964 int dw_pcie_resume_noirq(struct dw_pcie *pci) 965 { 966 int ret; 967 968 if (!pci->suspended) 969 return 0; 970 971 pci->suspended = false; 972 973 if (pci->pp.ops->init) { 974 ret = pci->pp.ops->init(&pci->pp); 975 if (ret) { 976 dev_err(pci->dev, "Host init failed: %d\n", ret); 977 return ret; 978 } 979 } 980 981 dw_pcie_setup_rc(&pci->pp); 982 983 ret = dw_pcie_start_link(pci); 984 if (ret) 985 return ret; 986 987 ret = dw_pcie_wait_for_link(pci); 988 if (ret) 989 return ret; 990 991 return ret; 992 } 993 EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq); 994