1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs 4 * 5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com 6 * 7 * Authors: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/of_device.h> 19 #include <linux/of_gpio.h> 20 #include <linux/of_pci.h> 21 #include <linux/pci.h> 22 #include <linux/phy/phy.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resource.h> 26 #include <linux/types.h> 27 #include <linux/mfd/syscon.h> 28 #include <linux/regmap.h> 29 #include <linux/gpio/consumer.h> 30 31 #include "../../pci.h" 32 #include "pcie-designware.h" 33 34 /* PCIe controller wrapper DRA7XX configuration registers */ 35 36 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 37 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 38 #define ERR_SYS BIT(0) 39 #define ERR_FATAL BIT(1) 40 #define ERR_NONFATAL BIT(2) 41 #define ERR_COR BIT(3) 42 #define ERR_AXI BIT(4) 43 #define ERR_ECRC BIT(5) 44 #define PME_TURN_OFF BIT(8) 45 #define PME_TO_ACK BIT(9) 46 #define PM_PME BIT(10) 47 #define LINK_REQ_RST BIT(11) 48 #define LINK_UP_EVT BIT(12) 49 #define CFG_BME_EVT BIT(13) 50 #define CFG_MSE_EVT BIT(14) 51 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ 52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ 53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) 54 55 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 56 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 57 #define INTA BIT(0) 58 #define INTB BIT(1) 59 #define INTC BIT(2) 60 #define INTD BIT(3) 61 #define MSI BIT(4) 62 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) 63 64 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 65 #define DEVICE_TYPE_EP 0x0 66 #define DEVICE_TYPE_LEG_EP 0x1 67 #define DEVICE_TYPE_RC 0x4 68 69 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 70 #define LTSSM_EN 0x1 71 72 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C 73 #define LINK_UP BIT(16) 74 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF 75 76 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 77 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 78 79 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c 80 #define MSI_REQ_GRANT BIT(0) 81 #define MSI_VECTOR_SHIFT 7 82 83 #define PCIE_1LANE_2LANE_SELECTION BIT(13) 84 #define PCIE_B1C0_MODE_SEL BIT(2) 85 #define PCIE_B0_B1_TSYNCEN BIT(0) 86 87 struct dra7xx_pcie { 88 struct dw_pcie *pci; 89 void __iomem *base; /* DT ti_conf */ 90 int phy_count; /* DT phy-names count */ 91 struct phy **phy; 92 struct irq_domain *irq_domain; 93 enum dw_pcie_device_mode mode; 94 }; 95 96 struct dra7xx_pcie_of_data { 97 enum dw_pcie_device_mode mode; 98 u32 b1co_mode_sel_mask; 99 }; 100 101 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) 102 103 static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) 104 { 105 return readl(pcie->base + offset); 106 } 107 108 static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, 109 u32 value) 110 { 111 writel(value, pcie->base + offset); 112 } 113 114 static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) 115 { 116 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; 117 } 118 119 static int dra7xx_pcie_link_up(struct dw_pcie *pci) 120 { 121 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 122 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 123 124 return !!(reg & LINK_UP); 125 } 126 127 static void dra7xx_pcie_stop_link(struct dw_pcie *pci) 128 { 129 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 130 u32 reg; 131 132 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 133 reg &= ~LTSSM_EN; 134 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 135 } 136 137 static int dra7xx_pcie_establish_link(struct dw_pcie *pci) 138 { 139 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 140 struct device *dev = pci->dev; 141 u32 reg; 142 143 if (dw_pcie_link_up(pci)) { 144 dev_err(dev, "link is already up\n"); 145 return 0; 146 } 147 148 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 149 reg |= LTSSM_EN; 150 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 151 152 return 0; 153 } 154 155 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) 156 { 157 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 158 LEG_EP_INTERRUPTS | MSI); 159 160 dra7xx_pcie_writel(dra7xx, 161 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 162 MSI | LEG_EP_INTERRUPTS); 163 } 164 165 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) 166 { 167 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, 168 INTERRUPTS); 169 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, 170 INTERRUPTS); 171 } 172 173 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) 174 { 175 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 176 dra7xx_pcie_enable_msi_interrupts(dra7xx); 177 } 178 179 static int dra7xx_pcie_host_init(struct pcie_port *pp) 180 { 181 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 182 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 183 184 dw_pcie_setup_rc(pp); 185 186 dra7xx_pcie_establish_link(pci); 187 dw_pcie_wait_for_link(pci); 188 dw_pcie_msi_init(pp); 189 dra7xx_pcie_enable_interrupts(dra7xx); 190 191 return 0; 192 } 193 194 static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 195 irq_hw_number_t hwirq) 196 { 197 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 198 irq_set_chip_data(irq, domain->host_data); 199 200 return 0; 201 } 202 203 static const struct irq_domain_ops intx_domain_ops = { 204 .map = dra7xx_pcie_intx_map, 205 .xlate = pci_irqd_intx_xlate, 206 }; 207 208 static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index) 209 { 210 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 211 unsigned long val; 212 int pos, irq; 213 214 val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + 215 (index * MSI_REG_CTRL_BLOCK_SIZE)); 216 if (!val) 217 return 0; 218 219 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0); 220 while (pos != MAX_MSI_IRQS_PER_CTRL) { 221 irq = irq_find_mapping(pp->irq_domain, 222 (index * MAX_MSI_IRQS_PER_CTRL) + pos); 223 generic_handle_irq(irq); 224 pos++; 225 pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos); 226 } 227 228 return 1; 229 } 230 231 static void dra7xx_pcie_handle_msi_irq(struct pcie_port *pp) 232 { 233 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 234 int ret, i, count, num_ctrls; 235 236 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 237 238 /** 239 * Need to make sure all MSI status bits read 0 before exiting. 240 * Else, new MSI IRQs are not registered by the wrapper. Have an 241 * upperbound for the loop and exit the IRQ in case of IRQ flood 242 * to avoid locking up system in interrupt context. 243 */ 244 count = 0; 245 do { 246 ret = 0; 247 248 for (i = 0; i < num_ctrls; i++) 249 ret |= dra7xx_pcie_handle_msi(pp, i); 250 count++; 251 } while (ret && count <= 1000); 252 253 if (count > 1000) 254 dev_warn_ratelimited(pci->dev, 255 "Too many MSI IRQs to handle\n"); 256 } 257 258 static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc) 259 { 260 struct irq_chip *chip = irq_desc_get_chip(desc); 261 struct dra7xx_pcie *dra7xx; 262 struct dw_pcie *pci; 263 struct pcie_port *pp; 264 unsigned long reg; 265 u32 virq, bit; 266 267 chained_irq_enter(chip, desc); 268 269 pp = irq_desc_get_handler_data(desc); 270 pci = to_dw_pcie_from_pp(pp); 271 dra7xx = to_dra7xx_pcie(pci); 272 273 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); 274 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); 275 276 switch (reg) { 277 case MSI: 278 dra7xx_pcie_handle_msi_irq(pp); 279 break; 280 case INTA: 281 case INTB: 282 case INTC: 283 case INTD: 284 for_each_set_bit(bit, ®, PCI_NUM_INTX) { 285 virq = irq_find_mapping(dra7xx->irq_domain, bit); 286 if (virq) 287 generic_handle_irq(virq); 288 } 289 break; 290 } 291 292 chained_irq_exit(chip, desc); 293 } 294 295 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) 296 { 297 struct dra7xx_pcie *dra7xx = arg; 298 struct dw_pcie *pci = dra7xx->pci; 299 struct device *dev = pci->dev; 300 struct dw_pcie_ep *ep = &pci->ep; 301 u32 reg; 302 303 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); 304 305 if (reg & ERR_SYS) 306 dev_dbg(dev, "System Error\n"); 307 308 if (reg & ERR_FATAL) 309 dev_dbg(dev, "Fatal Error\n"); 310 311 if (reg & ERR_NONFATAL) 312 dev_dbg(dev, "Non Fatal Error\n"); 313 314 if (reg & ERR_COR) 315 dev_dbg(dev, "Correctable Error\n"); 316 317 if (reg & ERR_AXI) 318 dev_dbg(dev, "AXI tag lookup fatal Error\n"); 319 320 if (reg & ERR_ECRC) 321 dev_dbg(dev, "ECRC Error\n"); 322 323 if (reg & PME_TURN_OFF) 324 dev_dbg(dev, 325 "Power Management Event Turn-Off message received\n"); 326 327 if (reg & PME_TO_ACK) 328 dev_dbg(dev, 329 "Power Management Turn-Off Ack message received\n"); 330 331 if (reg & PM_PME) 332 dev_dbg(dev, "PM Power Management Event message received\n"); 333 334 if (reg & LINK_REQ_RST) 335 dev_dbg(dev, "Link Request Reset\n"); 336 337 if (reg & LINK_UP_EVT) { 338 if (dra7xx->mode == DW_PCIE_EP_TYPE) 339 dw_pcie_ep_linkup(ep); 340 dev_dbg(dev, "Link-up state change\n"); 341 } 342 343 if (reg & CFG_BME_EVT) 344 dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); 345 346 if (reg & CFG_MSE_EVT) 347 dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); 348 349 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); 350 351 return IRQ_HANDLED; 352 } 353 354 static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) 355 { 356 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 357 struct device *dev = pci->dev; 358 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 359 struct device_node *node = dev->of_node; 360 struct device_node *pcie_intc_node = of_get_next_child(node, NULL); 361 362 if (!pcie_intc_node) { 363 dev_err(dev, "No PCIe Intc node found\n"); 364 return -ENODEV; 365 } 366 367 irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, 368 pp); 369 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 370 &intx_domain_ops, pp); 371 of_node_put(pcie_intc_node); 372 if (!dra7xx->irq_domain) { 373 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 374 return -ENODEV; 375 } 376 377 return 0; 378 } 379 380 static void dra7xx_pcie_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) 381 { 382 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 383 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 384 u64 msi_target; 385 386 msi_target = (u64)pp->msi_data; 387 388 msg->address_lo = lower_32_bits(msi_target); 389 msg->address_hi = upper_32_bits(msi_target); 390 391 msg->data = d->hwirq; 392 393 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", 394 (int)d->hwirq, msg->address_hi, msg->address_lo); 395 } 396 397 static int dra7xx_pcie_msi_set_affinity(struct irq_data *d, 398 const struct cpumask *mask, 399 bool force) 400 { 401 return -EINVAL; 402 } 403 404 static void dra7xx_pcie_bottom_mask(struct irq_data *d) 405 { 406 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 407 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 408 unsigned int res, bit, ctrl; 409 unsigned long flags; 410 411 raw_spin_lock_irqsave(&pp->lock, flags); 412 413 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 414 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 415 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 416 417 pp->irq_mask[ctrl] |= BIT(bit); 418 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, 419 pp->irq_mask[ctrl]); 420 421 raw_spin_unlock_irqrestore(&pp->lock, flags); 422 } 423 424 static void dra7xx_pcie_bottom_unmask(struct irq_data *d) 425 { 426 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 427 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 428 unsigned int res, bit, ctrl; 429 unsigned long flags; 430 431 raw_spin_lock_irqsave(&pp->lock, flags); 432 433 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 434 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 435 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 436 437 pp->irq_mask[ctrl] &= ~BIT(bit); 438 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, 439 pp->irq_mask[ctrl]); 440 441 raw_spin_unlock_irqrestore(&pp->lock, flags); 442 } 443 444 static void dra7xx_pcie_bottom_ack(struct irq_data *d) 445 { 446 struct pcie_port *pp = irq_data_get_irq_chip_data(d); 447 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 448 unsigned int res, bit, ctrl; 449 450 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; 451 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; 452 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; 453 454 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit)); 455 } 456 457 static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = { 458 .name = "DRA7XX-PCI-MSI", 459 .irq_ack = dra7xx_pcie_bottom_ack, 460 .irq_compose_msi_msg = dra7xx_pcie_setup_msi_msg, 461 .irq_set_affinity = dra7xx_pcie_msi_set_affinity, 462 .irq_mask = dra7xx_pcie_bottom_mask, 463 .irq_unmask = dra7xx_pcie_bottom_unmask, 464 }; 465 466 static int dra7xx_pcie_msi_host_init(struct pcie_port *pp) 467 { 468 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 469 struct device *dev = pci->dev; 470 u32 ctrl, num_ctrls; 471 int ret; 472 473 pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip; 474 475 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; 476 /* Initialize IRQ Status array */ 477 for (ctrl = 0; ctrl < num_ctrls; ctrl++) { 478 pp->irq_mask[ctrl] = ~0; 479 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + 480 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 481 pp->irq_mask[ctrl]); 482 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + 483 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 484 ~0); 485 } 486 487 ret = dw_pcie_allocate_domains(pp); 488 if (ret) 489 return ret; 490 491 pp->msi_data = dma_map_single_attrs(dev, &pp->msi_msg, 492 sizeof(pp->msi_msg), 493 DMA_FROM_DEVICE, 494 DMA_ATTR_SKIP_CPU_SYNC); 495 ret = dma_mapping_error(dev, pp->msi_data); 496 if (ret) { 497 dev_err(dev, "Failed to map MSI data\n"); 498 pp->msi_data = 0; 499 dw_pcie_free_msi(pp); 500 } 501 return ret; 502 } 503 504 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { 505 .host_init = dra7xx_pcie_host_init, 506 .msi_host_init = dra7xx_pcie_msi_host_init, 507 }; 508 509 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) 510 { 511 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 512 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 513 enum pci_barno bar; 514 515 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) 516 dw_pcie_ep_reset_bar(pci, bar); 517 518 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 519 } 520 521 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) 522 { 523 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); 524 mdelay(1); 525 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); 526 } 527 528 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, 529 u8 interrupt_num) 530 { 531 u32 reg; 532 533 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; 534 reg |= MSI_REQ_GRANT; 535 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); 536 } 537 538 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 539 enum pci_epc_irq_type type, u16 interrupt_num) 540 { 541 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 542 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 543 544 switch (type) { 545 case PCI_EPC_IRQ_LEGACY: 546 dra7xx_pcie_raise_legacy_irq(dra7xx); 547 break; 548 case PCI_EPC_IRQ_MSI: 549 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); 550 break; 551 default: 552 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 553 } 554 555 return 0; 556 } 557 558 static const struct pci_epc_features dra7xx_pcie_epc_features = { 559 .linkup_notifier = true, 560 .msi_capable = true, 561 .msix_capable = false, 562 }; 563 564 static const struct pci_epc_features* 565 dra7xx_pcie_get_features(struct dw_pcie_ep *ep) 566 { 567 return &dra7xx_pcie_epc_features; 568 } 569 570 static const struct dw_pcie_ep_ops pcie_ep_ops = { 571 .ep_init = dra7xx_pcie_ep_init, 572 .raise_irq = dra7xx_pcie_raise_irq, 573 .get_features = dra7xx_pcie_get_features, 574 }; 575 576 static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, 577 struct platform_device *pdev) 578 { 579 int ret; 580 struct dw_pcie_ep *ep; 581 struct device *dev = &pdev->dev; 582 struct dw_pcie *pci = dra7xx->pci; 583 584 ep = &pci->ep; 585 ep->ops = &pcie_ep_ops; 586 587 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics"); 588 if (IS_ERR(pci->dbi_base)) 589 return PTR_ERR(pci->dbi_base); 590 591 pci->dbi_base2 = 592 devm_platform_ioremap_resource_byname(pdev, "ep_dbics2"); 593 if (IS_ERR(pci->dbi_base2)) 594 return PTR_ERR(pci->dbi_base2); 595 596 ret = dw_pcie_ep_init(ep); 597 if (ret) { 598 dev_err(dev, "failed to initialize endpoint\n"); 599 return ret; 600 } 601 602 return 0; 603 } 604 605 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, 606 struct platform_device *pdev) 607 { 608 int ret; 609 struct dw_pcie *pci = dra7xx->pci; 610 struct pcie_port *pp = &pci->pp; 611 struct device *dev = pci->dev; 612 613 pp->irq = platform_get_irq(pdev, 1); 614 if (pp->irq < 0) 615 return pp->irq; 616 617 ret = dra7xx_pcie_init_irq_domain(pp); 618 if (ret < 0) 619 return ret; 620 621 pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics"); 622 if (IS_ERR(pci->dbi_base)) 623 return PTR_ERR(pci->dbi_base); 624 625 pp->ops = &dra7xx_pcie_host_ops; 626 627 ret = dw_pcie_host_init(pp); 628 if (ret) { 629 dev_err(dev, "failed to initialize host\n"); 630 return ret; 631 } 632 633 return 0; 634 } 635 636 static const struct dw_pcie_ops dw_pcie_ops = { 637 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, 638 .start_link = dra7xx_pcie_establish_link, 639 .stop_link = dra7xx_pcie_stop_link, 640 .link_up = dra7xx_pcie_link_up, 641 }; 642 643 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) 644 { 645 int phy_count = dra7xx->phy_count; 646 647 while (phy_count--) { 648 phy_power_off(dra7xx->phy[phy_count]); 649 phy_exit(dra7xx->phy[phy_count]); 650 } 651 } 652 653 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) 654 { 655 int phy_count = dra7xx->phy_count; 656 int ret; 657 int i; 658 659 for (i = 0; i < phy_count; i++) { 660 ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); 661 if (ret < 0) 662 goto err_phy; 663 664 ret = phy_init(dra7xx->phy[i]); 665 if (ret < 0) 666 goto err_phy; 667 668 ret = phy_power_on(dra7xx->phy[i]); 669 if (ret < 0) { 670 phy_exit(dra7xx->phy[i]); 671 goto err_phy; 672 } 673 } 674 675 return 0; 676 677 err_phy: 678 while (--i >= 0) { 679 phy_power_off(dra7xx->phy[i]); 680 phy_exit(dra7xx->phy[i]); 681 } 682 683 return ret; 684 } 685 686 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { 687 .mode = DW_PCIE_RC_TYPE, 688 }; 689 690 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { 691 .mode = DW_PCIE_EP_TYPE, 692 }; 693 694 static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { 695 .b1co_mode_sel_mask = BIT(2), 696 .mode = DW_PCIE_RC_TYPE, 697 }; 698 699 static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { 700 .b1co_mode_sel_mask = GENMASK(3, 2), 701 .mode = DW_PCIE_RC_TYPE, 702 }; 703 704 static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { 705 .b1co_mode_sel_mask = BIT(2), 706 .mode = DW_PCIE_EP_TYPE, 707 }; 708 709 static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { 710 .b1co_mode_sel_mask = GENMASK(3, 2), 711 .mode = DW_PCIE_EP_TYPE, 712 }; 713 714 static const struct of_device_id of_dra7xx_pcie_match[] = { 715 { 716 .compatible = "ti,dra7-pcie", 717 .data = &dra7xx_pcie_rc_of_data, 718 }, 719 { 720 .compatible = "ti,dra7-pcie-ep", 721 .data = &dra7xx_pcie_ep_of_data, 722 }, 723 { 724 .compatible = "ti,dra746-pcie-rc", 725 .data = &dra746_pcie_rc_of_data, 726 }, 727 { 728 .compatible = "ti,dra726-pcie-rc", 729 .data = &dra726_pcie_rc_of_data, 730 }, 731 { 732 .compatible = "ti,dra746-pcie-ep", 733 .data = &dra746_pcie_ep_of_data, 734 }, 735 { 736 .compatible = "ti,dra726-pcie-ep", 737 .data = &dra726_pcie_ep_of_data, 738 }, 739 {}, 740 }; 741 742 /* 743 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 744 * @dra7xx: the dra7xx device where the workaround should be applied 745 * 746 * Access to the PCIe slave port that are not 32-bit aligned will result 747 * in incorrect mapping to TLP Address and Byte enable fields. Therefore, 748 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or 749 * 0x3. 750 * 751 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. 752 */ 753 static int dra7xx_pcie_unaligned_memaccess(struct device *dev) 754 { 755 int ret; 756 struct device_node *np = dev->of_node; 757 struct of_phandle_args args; 758 struct regmap *regmap; 759 760 regmap = syscon_regmap_lookup_by_phandle(np, 761 "ti,syscon-unaligned-access"); 762 if (IS_ERR(regmap)) { 763 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); 764 return -EINVAL; 765 } 766 767 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", 768 2, 0, &args); 769 if (ret) { 770 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); 771 return ret; 772 } 773 774 ret = regmap_update_bits(regmap, args.args[0], args.args[1], 775 args.args[1]); 776 if (ret) 777 dev_err(dev, "failed to enable unaligned access\n"); 778 779 of_node_put(args.np); 780 781 return ret; 782 } 783 784 static int dra7xx_pcie_configure_two_lane(struct device *dev, 785 u32 b1co_mode_sel_mask) 786 { 787 struct device_node *np = dev->of_node; 788 struct regmap *pcie_syscon; 789 unsigned int pcie_reg; 790 u32 mask; 791 u32 val; 792 793 pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); 794 if (IS_ERR(pcie_syscon)) { 795 dev_err(dev, "unable to get ti,syscon-lane-sel\n"); 796 return -EINVAL; 797 } 798 799 if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, 800 &pcie_reg)) { 801 dev_err(dev, "couldn't get lane selection reg offset\n"); 802 return -EINVAL; 803 } 804 805 mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; 806 val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; 807 regmap_update_bits(pcie_syscon, pcie_reg, mask, val); 808 809 return 0; 810 } 811 812 static int __init dra7xx_pcie_probe(struct platform_device *pdev) 813 { 814 u32 reg; 815 int ret; 816 int irq; 817 int i; 818 int phy_count; 819 struct phy **phy; 820 struct device_link **link; 821 void __iomem *base; 822 struct dw_pcie *pci; 823 struct dra7xx_pcie *dra7xx; 824 struct device *dev = &pdev->dev; 825 struct device_node *np = dev->of_node; 826 char name[10]; 827 struct gpio_desc *reset; 828 const struct of_device_id *match; 829 const struct dra7xx_pcie_of_data *data; 830 enum dw_pcie_device_mode mode; 831 u32 b1co_mode_sel_mask; 832 833 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); 834 if (!match) 835 return -EINVAL; 836 837 data = (struct dra7xx_pcie_of_data *)match->data; 838 mode = (enum dw_pcie_device_mode)data->mode; 839 b1co_mode_sel_mask = data->b1co_mode_sel_mask; 840 841 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); 842 if (!dra7xx) 843 return -ENOMEM; 844 845 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 846 if (!pci) 847 return -ENOMEM; 848 849 pci->dev = dev; 850 pci->ops = &dw_pcie_ops; 851 852 irq = platform_get_irq(pdev, 0); 853 if (irq < 0) 854 return irq; 855 856 base = devm_platform_ioremap_resource_byname(pdev, "ti_conf"); 857 if (IS_ERR(base)) 858 return PTR_ERR(base); 859 860 phy_count = of_property_count_strings(np, "phy-names"); 861 if (phy_count < 0) { 862 dev_err(dev, "unable to find the strings\n"); 863 return phy_count; 864 } 865 866 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); 867 if (!phy) 868 return -ENOMEM; 869 870 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); 871 if (!link) 872 return -ENOMEM; 873 874 for (i = 0; i < phy_count; i++) { 875 snprintf(name, sizeof(name), "pcie-phy%d", i); 876 phy[i] = devm_phy_get(dev, name); 877 if (IS_ERR(phy[i])) 878 return PTR_ERR(phy[i]); 879 880 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); 881 if (!link[i]) { 882 ret = -EINVAL; 883 goto err_link; 884 } 885 } 886 887 dra7xx->base = base; 888 dra7xx->phy = phy; 889 dra7xx->pci = pci; 890 dra7xx->phy_count = phy_count; 891 892 if (phy_count == 2) { 893 ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); 894 if (ret < 0) 895 dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ 896 } 897 898 ret = dra7xx_pcie_enable_phy(dra7xx); 899 if (ret) { 900 dev_err(dev, "failed to enable phy\n"); 901 return ret; 902 } 903 904 platform_set_drvdata(pdev, dra7xx); 905 906 pm_runtime_enable(dev); 907 ret = pm_runtime_get_sync(dev); 908 if (ret < 0) { 909 dev_err(dev, "pm_runtime_get_sync failed\n"); 910 goto err_get_sync; 911 } 912 913 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); 914 if (IS_ERR(reset)) { 915 ret = PTR_ERR(reset); 916 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); 917 goto err_gpio; 918 } 919 920 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 921 reg &= ~LTSSM_EN; 922 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 923 924 switch (mode) { 925 case DW_PCIE_RC_TYPE: 926 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { 927 ret = -ENODEV; 928 goto err_gpio; 929 } 930 931 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 932 DEVICE_TYPE_RC); 933 934 ret = dra7xx_pcie_unaligned_memaccess(dev); 935 if (ret) 936 dev_err(dev, "WA for Errata i870 not applied\n"); 937 938 ret = dra7xx_add_pcie_port(dra7xx, pdev); 939 if (ret < 0) 940 goto err_gpio; 941 break; 942 case DW_PCIE_EP_TYPE: 943 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { 944 ret = -ENODEV; 945 goto err_gpio; 946 } 947 948 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 949 DEVICE_TYPE_EP); 950 951 ret = dra7xx_pcie_unaligned_memaccess(dev); 952 if (ret) 953 goto err_gpio; 954 955 ret = dra7xx_add_pcie_ep(dra7xx, pdev); 956 if (ret < 0) 957 goto err_gpio; 958 break; 959 default: 960 dev_err(dev, "INVALID device type %d\n", mode); 961 } 962 dra7xx->mode = mode; 963 964 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, 965 IRQF_SHARED, "dra7xx-pcie-main", dra7xx); 966 if (ret) { 967 dev_err(dev, "failed to request irq\n"); 968 goto err_gpio; 969 } 970 971 return 0; 972 973 err_gpio: 974 err_get_sync: 975 pm_runtime_put(dev); 976 pm_runtime_disable(dev); 977 dra7xx_pcie_disable_phy(dra7xx); 978 979 err_link: 980 while (--i >= 0) 981 device_link_del(link[i]); 982 983 return ret; 984 } 985 986 #ifdef CONFIG_PM_SLEEP 987 static int dra7xx_pcie_suspend(struct device *dev) 988 { 989 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 990 struct dw_pcie *pci = dra7xx->pci; 991 u32 val; 992 993 if (dra7xx->mode != DW_PCIE_RC_TYPE) 994 return 0; 995 996 /* clear MSE */ 997 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 998 val &= ~PCI_COMMAND_MEMORY; 999 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 1000 1001 return 0; 1002 } 1003 1004 static int dra7xx_pcie_resume(struct device *dev) 1005 { 1006 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1007 struct dw_pcie *pci = dra7xx->pci; 1008 u32 val; 1009 1010 if (dra7xx->mode != DW_PCIE_RC_TYPE) 1011 return 0; 1012 1013 /* set MSE */ 1014 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 1015 val |= PCI_COMMAND_MEMORY; 1016 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 1017 1018 return 0; 1019 } 1020 1021 static int dra7xx_pcie_suspend_noirq(struct device *dev) 1022 { 1023 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1024 1025 dra7xx_pcie_disable_phy(dra7xx); 1026 1027 return 0; 1028 } 1029 1030 static int dra7xx_pcie_resume_noirq(struct device *dev) 1031 { 1032 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1033 int ret; 1034 1035 ret = dra7xx_pcie_enable_phy(dra7xx); 1036 if (ret) { 1037 dev_err(dev, "failed to enable phy\n"); 1038 return ret; 1039 } 1040 1041 return 0; 1042 } 1043 #endif 1044 1045 static void dra7xx_pcie_shutdown(struct platform_device *pdev) 1046 { 1047 struct device *dev = &pdev->dev; 1048 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 1049 int ret; 1050 1051 dra7xx_pcie_stop_link(dra7xx->pci); 1052 1053 ret = pm_runtime_put_sync(dev); 1054 if (ret < 0) 1055 dev_dbg(dev, "pm_runtime_put_sync failed\n"); 1056 1057 pm_runtime_disable(dev); 1058 dra7xx_pcie_disable_phy(dra7xx); 1059 } 1060 1061 static const struct dev_pm_ops dra7xx_pcie_pm_ops = { 1062 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) 1063 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, 1064 dra7xx_pcie_resume_noirq) 1065 }; 1066 1067 static struct platform_driver dra7xx_pcie_driver = { 1068 .driver = { 1069 .name = "dra7-pcie", 1070 .of_match_table = of_dra7xx_pcie_match, 1071 .suppress_bind_attrs = true, 1072 .pm = &dra7xx_pcie_pm_ops, 1073 }, 1074 .shutdown = dra7xx_pcie_shutdown, 1075 }; 1076 builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); 1077