1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs 4 * 5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Authors: Kishon Vijay Abraham I <kishon@ti.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/irqdomain.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/of_device.h> 19 #include <linux/of_gpio.h> 20 #include <linux/of_pci.h> 21 #include <linux/pci.h> 22 #include <linux/phy/phy.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/resource.h> 26 #include <linux/types.h> 27 #include <linux/mfd/syscon.h> 28 #include <linux/regmap.h> 29 30 #include "../../pci.h" 31 #include "pcie-designware.h" 32 33 /* PCIe controller wrapper DRA7XX configuration registers */ 34 35 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 36 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 37 #define ERR_SYS BIT(0) 38 #define ERR_FATAL BIT(1) 39 #define ERR_NONFATAL BIT(2) 40 #define ERR_COR BIT(3) 41 #define ERR_AXI BIT(4) 42 #define ERR_ECRC BIT(5) 43 #define PME_TURN_OFF BIT(8) 44 #define PME_TO_ACK BIT(9) 45 #define PM_PME BIT(10) 46 #define LINK_REQ_RST BIT(11) 47 #define LINK_UP_EVT BIT(12) 48 #define CFG_BME_EVT BIT(13) 49 #define CFG_MSE_EVT BIT(14) 50 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ 51 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ 52 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) 53 54 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 55 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 56 #define INTA BIT(0) 57 #define INTB BIT(1) 58 #define INTC BIT(2) 59 #define INTD BIT(3) 60 #define MSI BIT(4) 61 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) 62 63 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 64 #define DEVICE_TYPE_EP 0x0 65 #define DEVICE_TYPE_LEG_EP 0x1 66 #define DEVICE_TYPE_RC 0x4 67 68 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 69 #define LTSSM_EN 0x1 70 71 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C 72 #define LINK_UP BIT(16) 73 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF 74 75 #define EXP_CAP_ID_OFFSET 0x70 76 77 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 78 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 79 80 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c 81 #define MSI_REQ_GRANT BIT(0) 82 #define MSI_VECTOR_SHIFT 7 83 84 #define PCIE_1LANE_2LANE_SELECTION BIT(13) 85 #define PCIE_B1C0_MODE_SEL BIT(2) 86 #define PCIE_B0_B1_TSYNCEN BIT(0) 87 88 struct dra7xx_pcie { 89 struct dw_pcie *pci; 90 void __iomem *base; /* DT ti_conf */ 91 int phy_count; /* DT phy-names count */ 92 struct phy **phy; 93 int link_gen; 94 struct irq_domain *irq_domain; 95 enum dw_pcie_device_mode mode; 96 }; 97 98 struct dra7xx_pcie_of_data { 99 enum dw_pcie_device_mode mode; 100 u32 b1co_mode_sel_mask; 101 }; 102 103 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) 104 105 static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) 106 { 107 return readl(pcie->base + offset); 108 } 109 110 static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, 111 u32 value) 112 { 113 writel(value, pcie->base + offset); 114 } 115 116 static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) 117 { 118 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; 119 } 120 121 static int dra7xx_pcie_link_up(struct dw_pcie *pci) 122 { 123 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 124 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 125 126 return !!(reg & LINK_UP); 127 } 128 129 static void dra7xx_pcie_stop_link(struct dw_pcie *pci) 130 { 131 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 132 u32 reg; 133 134 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 135 reg &= ~LTSSM_EN; 136 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 137 } 138 139 static int dra7xx_pcie_establish_link(struct dw_pcie *pci) 140 { 141 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 142 struct device *dev = pci->dev; 143 u32 reg; 144 u32 exp_cap_off = EXP_CAP_ID_OFFSET; 145 146 if (dw_pcie_link_up(pci)) { 147 dev_err(dev, "link is already up\n"); 148 return 0; 149 } 150 151 if (dra7xx->link_gen == 1) { 152 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, 153 4, ®); 154 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 155 reg &= ~((u32)PCI_EXP_LNKCAP_SLS); 156 reg |= PCI_EXP_LNKCAP_SLS_2_5GB; 157 dw_pcie_write(pci->dbi_base + exp_cap_off + 158 PCI_EXP_LNKCAP, 4, reg); 159 } 160 161 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, 162 2, ®); 163 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { 164 reg &= ~((u32)PCI_EXP_LNKCAP_SLS); 165 reg |= PCI_EXP_LNKCAP_SLS_2_5GB; 166 dw_pcie_write(pci->dbi_base + exp_cap_off + 167 PCI_EXP_LNKCTL2, 2, reg); 168 } 169 } 170 171 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 172 reg |= LTSSM_EN; 173 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 174 175 return 0; 176 } 177 178 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) 179 { 180 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, 181 LEG_EP_INTERRUPTS | MSI); 182 183 dra7xx_pcie_writel(dra7xx, 184 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, 185 MSI | LEG_EP_INTERRUPTS); 186 } 187 188 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) 189 { 190 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, 191 INTERRUPTS); 192 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, 193 INTERRUPTS); 194 } 195 196 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) 197 { 198 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 199 dra7xx_pcie_enable_msi_interrupts(dra7xx); 200 } 201 202 static int dra7xx_pcie_host_init(struct pcie_port *pp) 203 { 204 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 205 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 206 207 dw_pcie_setup_rc(pp); 208 209 dra7xx_pcie_establish_link(pci); 210 dw_pcie_wait_for_link(pci); 211 dw_pcie_msi_init(pp); 212 dra7xx_pcie_enable_interrupts(dra7xx); 213 214 return 0; 215 } 216 217 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { 218 .host_init = dra7xx_pcie_host_init, 219 }; 220 221 static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, 222 irq_hw_number_t hwirq) 223 { 224 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); 225 irq_set_chip_data(irq, domain->host_data); 226 227 return 0; 228 } 229 230 static const struct irq_domain_ops intx_domain_ops = { 231 .map = dra7xx_pcie_intx_map, 232 .xlate = pci_irqd_intx_xlate, 233 }; 234 235 static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) 236 { 237 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 238 struct device *dev = pci->dev; 239 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 240 struct device_node *node = dev->of_node; 241 struct device_node *pcie_intc_node = of_get_next_child(node, NULL); 242 243 if (!pcie_intc_node) { 244 dev_err(dev, "No PCIe Intc node found\n"); 245 return -ENODEV; 246 } 247 248 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 249 &intx_domain_ops, pp); 250 if (!dra7xx->irq_domain) { 251 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 252 return -ENODEV; 253 } 254 255 return 0; 256 } 257 258 static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) 259 { 260 struct dra7xx_pcie *dra7xx = arg; 261 struct dw_pcie *pci = dra7xx->pci; 262 struct pcie_port *pp = &pci->pp; 263 unsigned long reg; 264 u32 virq, bit; 265 266 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); 267 268 switch (reg) { 269 case MSI: 270 dw_handle_msi_irq(pp); 271 break; 272 case INTA: 273 case INTB: 274 case INTC: 275 case INTD: 276 for_each_set_bit(bit, ®, PCI_NUM_INTX) { 277 virq = irq_find_mapping(dra7xx->irq_domain, bit); 278 if (virq) 279 generic_handle_irq(virq); 280 } 281 break; 282 } 283 284 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); 285 286 return IRQ_HANDLED; 287 } 288 289 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) 290 { 291 struct dra7xx_pcie *dra7xx = arg; 292 struct dw_pcie *pci = dra7xx->pci; 293 struct device *dev = pci->dev; 294 struct dw_pcie_ep *ep = &pci->ep; 295 u32 reg; 296 297 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); 298 299 if (reg & ERR_SYS) 300 dev_dbg(dev, "System Error\n"); 301 302 if (reg & ERR_FATAL) 303 dev_dbg(dev, "Fatal Error\n"); 304 305 if (reg & ERR_NONFATAL) 306 dev_dbg(dev, "Non Fatal Error\n"); 307 308 if (reg & ERR_COR) 309 dev_dbg(dev, "Correctable Error\n"); 310 311 if (reg & ERR_AXI) 312 dev_dbg(dev, "AXI tag lookup fatal Error\n"); 313 314 if (reg & ERR_ECRC) 315 dev_dbg(dev, "ECRC Error\n"); 316 317 if (reg & PME_TURN_OFF) 318 dev_dbg(dev, 319 "Power Management Event Turn-Off message received\n"); 320 321 if (reg & PME_TO_ACK) 322 dev_dbg(dev, 323 "Power Management Turn-Off Ack message received\n"); 324 325 if (reg & PM_PME) 326 dev_dbg(dev, "PM Power Management Event message received\n"); 327 328 if (reg & LINK_REQ_RST) 329 dev_dbg(dev, "Link Request Reset\n"); 330 331 if (reg & LINK_UP_EVT) { 332 if (dra7xx->mode == DW_PCIE_EP_TYPE) 333 dw_pcie_ep_linkup(ep); 334 dev_dbg(dev, "Link-up state change\n"); 335 } 336 337 if (reg & CFG_BME_EVT) 338 dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); 339 340 if (reg & CFG_MSE_EVT) 341 dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); 342 343 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); 344 345 return IRQ_HANDLED; 346 } 347 348 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) 349 { 350 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 351 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 352 enum pci_barno bar; 353 354 for (bar = BAR_0; bar <= BAR_5; bar++) 355 dw_pcie_ep_reset_bar(pci, bar); 356 357 dra7xx_pcie_enable_wrapper_interrupts(dra7xx); 358 } 359 360 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) 361 { 362 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); 363 mdelay(1); 364 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); 365 } 366 367 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, 368 u8 interrupt_num) 369 { 370 u32 reg; 371 372 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; 373 reg |= MSI_REQ_GRANT; 374 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); 375 } 376 377 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 378 enum pci_epc_irq_type type, u16 interrupt_num) 379 { 380 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 381 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); 382 383 switch (type) { 384 case PCI_EPC_IRQ_LEGACY: 385 dra7xx_pcie_raise_legacy_irq(dra7xx); 386 break; 387 case PCI_EPC_IRQ_MSI: 388 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); 389 break; 390 default: 391 dev_err(pci->dev, "UNKNOWN IRQ type\n"); 392 } 393 394 return 0; 395 } 396 397 static const struct pci_epc_features dra7xx_pcie_epc_features = { 398 .linkup_notifier = true, 399 .msi_capable = true, 400 .msix_capable = false, 401 }; 402 403 static const struct pci_epc_features* 404 dra7xx_pcie_get_features(struct dw_pcie_ep *ep) 405 { 406 return &dra7xx_pcie_epc_features; 407 } 408 409 static struct dw_pcie_ep_ops pcie_ep_ops = { 410 .ep_init = dra7xx_pcie_ep_init, 411 .raise_irq = dra7xx_pcie_raise_irq, 412 .get_features = dra7xx_pcie_get_features, 413 }; 414 415 static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, 416 struct platform_device *pdev) 417 { 418 int ret; 419 struct dw_pcie_ep *ep; 420 struct resource *res; 421 struct device *dev = &pdev->dev; 422 struct dw_pcie *pci = dra7xx->pci; 423 424 ep = &pci->ep; 425 ep->ops = &pcie_ep_ops; 426 427 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); 428 pci->dbi_base = devm_ioremap_resource(dev, res); 429 if (IS_ERR(pci->dbi_base)) 430 return PTR_ERR(pci->dbi_base); 431 432 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); 433 pci->dbi_base2 = devm_ioremap_resource(dev, res); 434 if (IS_ERR(pci->dbi_base2)) 435 return PTR_ERR(pci->dbi_base2); 436 437 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); 438 if (!res) 439 return -EINVAL; 440 441 ep->phys_base = res->start; 442 ep->addr_size = resource_size(res); 443 444 ret = dw_pcie_ep_init(ep); 445 if (ret) { 446 dev_err(dev, "failed to initialize endpoint\n"); 447 return ret; 448 } 449 450 return 0; 451 } 452 453 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, 454 struct platform_device *pdev) 455 { 456 int ret; 457 struct dw_pcie *pci = dra7xx->pci; 458 struct pcie_port *pp = &pci->pp; 459 struct device *dev = pci->dev; 460 struct resource *res; 461 462 pp->irq = platform_get_irq(pdev, 1); 463 if (pp->irq < 0) { 464 dev_err(dev, "missing IRQ resource\n"); 465 return pp->irq; 466 } 467 468 ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, 469 IRQF_SHARED | IRQF_NO_THREAD, 470 "dra7-pcie-msi", dra7xx); 471 if (ret) { 472 dev_err(dev, "failed to request irq\n"); 473 return ret; 474 } 475 476 ret = dra7xx_pcie_init_irq_domain(pp); 477 if (ret < 0) 478 return ret; 479 480 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); 481 pci->dbi_base = devm_ioremap_resource(dev, res); 482 if (IS_ERR(pci->dbi_base)) 483 return PTR_ERR(pci->dbi_base); 484 485 pp->ops = &dra7xx_pcie_host_ops; 486 487 ret = dw_pcie_host_init(pp); 488 if (ret) { 489 dev_err(dev, "failed to initialize host\n"); 490 return ret; 491 } 492 493 return 0; 494 } 495 496 static const struct dw_pcie_ops dw_pcie_ops = { 497 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, 498 .start_link = dra7xx_pcie_establish_link, 499 .stop_link = dra7xx_pcie_stop_link, 500 .link_up = dra7xx_pcie_link_up, 501 }; 502 503 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) 504 { 505 int phy_count = dra7xx->phy_count; 506 507 while (phy_count--) { 508 phy_power_off(dra7xx->phy[phy_count]); 509 phy_exit(dra7xx->phy[phy_count]); 510 } 511 } 512 513 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) 514 { 515 int phy_count = dra7xx->phy_count; 516 int ret; 517 int i; 518 519 for (i = 0; i < phy_count; i++) { 520 ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); 521 if (ret < 0) 522 goto err_phy; 523 524 ret = phy_init(dra7xx->phy[i]); 525 if (ret < 0) 526 goto err_phy; 527 528 ret = phy_power_on(dra7xx->phy[i]); 529 if (ret < 0) { 530 phy_exit(dra7xx->phy[i]); 531 goto err_phy; 532 } 533 } 534 535 return 0; 536 537 err_phy: 538 while (--i >= 0) { 539 phy_power_off(dra7xx->phy[i]); 540 phy_exit(dra7xx->phy[i]); 541 } 542 543 return ret; 544 } 545 546 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { 547 .mode = DW_PCIE_RC_TYPE, 548 }; 549 550 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { 551 .mode = DW_PCIE_EP_TYPE, 552 }; 553 554 static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { 555 .b1co_mode_sel_mask = BIT(2), 556 .mode = DW_PCIE_RC_TYPE, 557 }; 558 559 static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { 560 .b1co_mode_sel_mask = GENMASK(3, 2), 561 .mode = DW_PCIE_RC_TYPE, 562 }; 563 564 static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { 565 .b1co_mode_sel_mask = BIT(2), 566 .mode = DW_PCIE_EP_TYPE, 567 }; 568 569 static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { 570 .b1co_mode_sel_mask = GENMASK(3, 2), 571 .mode = DW_PCIE_EP_TYPE, 572 }; 573 574 static const struct of_device_id of_dra7xx_pcie_match[] = { 575 { 576 .compatible = "ti,dra7-pcie", 577 .data = &dra7xx_pcie_rc_of_data, 578 }, 579 { 580 .compatible = "ti,dra7-pcie-ep", 581 .data = &dra7xx_pcie_ep_of_data, 582 }, 583 { 584 .compatible = "ti,dra746-pcie-rc", 585 .data = &dra746_pcie_rc_of_data, 586 }, 587 { 588 .compatible = "ti,dra726-pcie-rc", 589 .data = &dra726_pcie_rc_of_data, 590 }, 591 { 592 .compatible = "ti,dra746-pcie-ep", 593 .data = &dra746_pcie_ep_of_data, 594 }, 595 { 596 .compatible = "ti,dra726-pcie-ep", 597 .data = &dra726_pcie_ep_of_data, 598 }, 599 {}, 600 }; 601 602 /* 603 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 604 * @dra7xx: the dra7xx device where the workaround should be applied 605 * 606 * Access to the PCIe slave port that are not 32-bit aligned will result 607 * in incorrect mapping to TLP Address and Byte enable fields. Therefore, 608 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or 609 * 0x3. 610 * 611 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. 612 */ 613 static int dra7xx_pcie_unaligned_memaccess(struct device *dev) 614 { 615 int ret; 616 struct device_node *np = dev->of_node; 617 struct of_phandle_args args; 618 struct regmap *regmap; 619 620 regmap = syscon_regmap_lookup_by_phandle(np, 621 "ti,syscon-unaligned-access"); 622 if (IS_ERR(regmap)) { 623 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); 624 return -EINVAL; 625 } 626 627 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", 628 2, 0, &args); 629 if (ret) { 630 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); 631 return ret; 632 } 633 634 ret = regmap_update_bits(regmap, args.args[0], args.args[1], 635 args.args[1]); 636 if (ret) 637 dev_err(dev, "failed to enable unaligned access\n"); 638 639 of_node_put(args.np); 640 641 return ret; 642 } 643 644 static int dra7xx_pcie_configure_two_lane(struct device *dev, 645 u32 b1co_mode_sel_mask) 646 { 647 struct device_node *np = dev->of_node; 648 struct regmap *pcie_syscon; 649 unsigned int pcie_reg; 650 u32 mask; 651 u32 val; 652 653 pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); 654 if (IS_ERR(pcie_syscon)) { 655 dev_err(dev, "unable to get ti,syscon-lane-sel\n"); 656 return -EINVAL; 657 } 658 659 if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, 660 &pcie_reg)) { 661 dev_err(dev, "couldn't get lane selection reg offset\n"); 662 return -EINVAL; 663 } 664 665 mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; 666 val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; 667 regmap_update_bits(pcie_syscon, pcie_reg, mask, val); 668 669 return 0; 670 } 671 672 static int __init dra7xx_pcie_probe(struct platform_device *pdev) 673 { 674 u32 reg; 675 int ret; 676 int irq; 677 int i; 678 int phy_count; 679 struct phy **phy; 680 struct device_link **link; 681 void __iomem *base; 682 struct resource *res; 683 struct dw_pcie *pci; 684 struct dra7xx_pcie *dra7xx; 685 struct device *dev = &pdev->dev; 686 struct device_node *np = dev->of_node; 687 char name[10]; 688 struct gpio_desc *reset; 689 const struct of_device_id *match; 690 const struct dra7xx_pcie_of_data *data; 691 enum dw_pcie_device_mode mode; 692 u32 b1co_mode_sel_mask; 693 694 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); 695 if (!match) 696 return -EINVAL; 697 698 data = (struct dra7xx_pcie_of_data *)match->data; 699 mode = (enum dw_pcie_device_mode)data->mode; 700 b1co_mode_sel_mask = data->b1co_mode_sel_mask; 701 702 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); 703 if (!dra7xx) 704 return -ENOMEM; 705 706 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 707 if (!pci) 708 return -ENOMEM; 709 710 pci->dev = dev; 711 pci->ops = &dw_pcie_ops; 712 713 irq = platform_get_irq(pdev, 0); 714 if (irq < 0) { 715 dev_err(dev, "missing IRQ resource: %d\n", irq); 716 return irq; 717 } 718 719 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); 720 base = devm_ioremap_nocache(dev, res->start, resource_size(res)); 721 if (!base) 722 return -ENOMEM; 723 724 phy_count = of_property_count_strings(np, "phy-names"); 725 if (phy_count < 0) { 726 dev_err(dev, "unable to find the strings\n"); 727 return phy_count; 728 } 729 730 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); 731 if (!phy) 732 return -ENOMEM; 733 734 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); 735 if (!link) 736 return -ENOMEM; 737 738 for (i = 0; i < phy_count; i++) { 739 snprintf(name, sizeof(name), "pcie-phy%d", i); 740 phy[i] = devm_phy_get(dev, name); 741 if (IS_ERR(phy[i])) 742 return PTR_ERR(phy[i]); 743 744 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); 745 if (!link[i]) { 746 ret = -EINVAL; 747 goto err_link; 748 } 749 } 750 751 dra7xx->base = base; 752 dra7xx->phy = phy; 753 dra7xx->pci = pci; 754 dra7xx->phy_count = phy_count; 755 756 if (phy_count == 2) { 757 ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); 758 if (ret < 0) 759 dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ 760 } 761 762 ret = dra7xx_pcie_enable_phy(dra7xx); 763 if (ret) { 764 dev_err(dev, "failed to enable phy\n"); 765 return ret; 766 } 767 768 platform_set_drvdata(pdev, dra7xx); 769 770 pm_runtime_enable(dev); 771 ret = pm_runtime_get_sync(dev); 772 if (ret < 0) { 773 dev_err(dev, "pm_runtime_get_sync failed\n"); 774 goto err_get_sync; 775 } 776 777 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); 778 if (IS_ERR(reset)) { 779 ret = PTR_ERR(reset); 780 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); 781 goto err_gpio; 782 } 783 784 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); 785 reg &= ~LTSSM_EN; 786 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 787 788 dra7xx->link_gen = of_pci_get_max_link_speed(np); 789 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) 790 dra7xx->link_gen = 2; 791 792 switch (mode) { 793 case DW_PCIE_RC_TYPE: 794 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { 795 ret = -ENODEV; 796 goto err_gpio; 797 } 798 799 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 800 DEVICE_TYPE_RC); 801 802 ret = dra7xx_pcie_unaligned_memaccess(dev); 803 if (ret) 804 dev_err(dev, "WA for Errata i870 not applied\n"); 805 806 ret = dra7xx_add_pcie_port(dra7xx, pdev); 807 if (ret < 0) 808 goto err_gpio; 809 break; 810 case DW_PCIE_EP_TYPE: 811 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { 812 ret = -ENODEV; 813 goto err_gpio; 814 } 815 816 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, 817 DEVICE_TYPE_EP); 818 819 ret = dra7xx_pcie_unaligned_memaccess(dev); 820 if (ret) 821 goto err_gpio; 822 823 ret = dra7xx_add_pcie_ep(dra7xx, pdev); 824 if (ret < 0) 825 goto err_gpio; 826 break; 827 default: 828 dev_err(dev, "INVALID device type %d\n", mode); 829 } 830 dra7xx->mode = mode; 831 832 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, 833 IRQF_SHARED, "dra7xx-pcie-main", dra7xx); 834 if (ret) { 835 dev_err(dev, "failed to request irq\n"); 836 goto err_gpio; 837 } 838 839 return 0; 840 841 err_gpio: 842 pm_runtime_put(dev); 843 844 err_get_sync: 845 pm_runtime_disable(dev); 846 dra7xx_pcie_disable_phy(dra7xx); 847 848 err_link: 849 while (--i >= 0) 850 device_link_del(link[i]); 851 852 return ret; 853 } 854 855 #ifdef CONFIG_PM_SLEEP 856 static int dra7xx_pcie_suspend(struct device *dev) 857 { 858 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 859 struct dw_pcie *pci = dra7xx->pci; 860 u32 val; 861 862 if (dra7xx->mode != DW_PCIE_RC_TYPE) 863 return 0; 864 865 /* clear MSE */ 866 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 867 val &= ~PCI_COMMAND_MEMORY; 868 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 869 870 return 0; 871 } 872 873 static int dra7xx_pcie_resume(struct device *dev) 874 { 875 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 876 struct dw_pcie *pci = dra7xx->pci; 877 u32 val; 878 879 if (dra7xx->mode != DW_PCIE_RC_TYPE) 880 return 0; 881 882 /* set MSE */ 883 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 884 val |= PCI_COMMAND_MEMORY; 885 dw_pcie_writel_dbi(pci, PCI_COMMAND, val); 886 887 return 0; 888 } 889 890 static int dra7xx_pcie_suspend_noirq(struct device *dev) 891 { 892 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 893 894 dra7xx_pcie_disable_phy(dra7xx); 895 896 return 0; 897 } 898 899 static int dra7xx_pcie_resume_noirq(struct device *dev) 900 { 901 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 902 int ret; 903 904 ret = dra7xx_pcie_enable_phy(dra7xx); 905 if (ret) { 906 dev_err(dev, "failed to enable phy\n"); 907 return ret; 908 } 909 910 return 0; 911 } 912 #endif 913 914 static void dra7xx_pcie_shutdown(struct platform_device *pdev) 915 { 916 struct device *dev = &pdev->dev; 917 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); 918 int ret; 919 920 dra7xx_pcie_stop_link(dra7xx->pci); 921 922 ret = pm_runtime_put_sync(dev); 923 if (ret < 0) 924 dev_dbg(dev, "pm_runtime_put_sync failed\n"); 925 926 pm_runtime_disable(dev); 927 dra7xx_pcie_disable_phy(dra7xx); 928 } 929 930 static const struct dev_pm_ops dra7xx_pcie_pm_ops = { 931 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) 932 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, 933 dra7xx_pcie_resume_noirq) 934 }; 935 936 static struct platform_driver dra7xx_pcie_driver = { 937 .driver = { 938 .name = "dra7-pcie", 939 .of_match_table = of_dra7xx_pcie_match, 940 .suppress_bind_attrs = true, 941 .pm = &dra7xx_pcie_pm_ops, 942 }, 943 .shutdown = dra7xx_pcie_shutdown, 944 }; 945 builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); 946