1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/delay.h> 14 #include <linux/gpio/consumer.h> 15 #include <linux/interconnect.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/iopoll.h> 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/of_device.h> 22 #include <linux/of_gpio.h> 23 #include <linux/pci.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/platform_device.h> 26 #include <linux/phy/phy.h> 27 #include <linux/regulator/consumer.h> 28 #include <linux/reset.h> 29 #include <linux/slab.h> 30 #include <linux/types.h> 31 32 #include "../../pci.h" 33 #include "pcie-designware.h" 34 35 #define PCIE20_PARF_SYS_CTRL 0x00 36 #define MST_WAKEUP_EN BIT(13) 37 #define SLV_WAKEUP_EN BIT(12) 38 #define MSTR_ACLK_CGC_DIS BIT(10) 39 #define SLV_ACLK_CGC_DIS BIT(9) 40 #define CORE_CLK_CGC_DIS BIT(6) 41 #define AUX_PWR_DET BIT(4) 42 #define L23_CLK_RMV_DIS BIT(2) 43 #define L1_CLK_RMV_DIS BIT(1) 44 45 #define PCIE20_PARF_PM_CTRL 0x20 46 #define REQ_NOT_ENTR_L1 BIT(5) 47 48 #define PCIE20_PARF_PHY_CTRL 0x40 49 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 50 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) 51 52 #define PCIE20_PARF_PHY_REFCLK 0x4C 53 #define PHY_REFCLK_SSP_EN BIT(16) 54 #define PHY_REFCLK_USE_PAD BIT(12) 55 56 #define PCIE20_PARF_DBI_BASE_ADDR 0x168 57 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C 58 #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 59 #define AHB_CLK_EN BIT(0) 60 #define MSTR_AXI_CLK_EN BIT(1) 61 #define BYPASS BIT(4) 62 63 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 64 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 65 #define PCIE20_PARF_LTSSM 0x1B0 66 #define PCIE20_PARF_SID_OFFSET 0x234 67 #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C 68 #define PCIE20_PARF_DEVICE_TYPE 0x1000 69 #define PCIE20_PARF_BDF_TO_SID_TABLE_N 0x2000 70 71 #define PCIE20_ELBI_SYS_CTRL 0x04 72 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) 73 74 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 75 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 76 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 77 #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c 78 #define CFG_BRIDGE_SB_INIT BIT(0) 79 80 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, \ 81 250) 82 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, \ 83 1) 84 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 85 PCI_EXP_SLTCAP_PCP | \ 86 PCI_EXP_SLTCAP_MRLSP | \ 87 PCI_EXP_SLTCAP_AIP | \ 88 PCI_EXP_SLTCAP_PIP | \ 89 PCI_EXP_SLTCAP_HPS | \ 90 PCI_EXP_SLTCAP_HPC | \ 91 PCI_EXP_SLTCAP_EIP | \ 92 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 93 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 94 95 #define PCIE20_PARF_Q2A_FLUSH 0x1AC 96 97 #define PCIE20_MISC_CONTROL_1_REG 0x8BC 98 #define DBI_RO_WR_EN 1 99 100 #define PERST_DELAY_US 1000 101 /* PARF registers */ 102 #define PCIE20_PARF_PCS_DEEMPH 0x34 103 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) 104 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) 105 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) 106 107 #define PCIE20_PARF_PCS_SWING 0x38 108 #define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) 109 #define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) 110 111 #define PCIE20_PARF_CONFIG_BITS 0x50 112 #define PHY_RX0_EQ(x) ((x) << 24) 113 114 #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 115 #define SLV_ADDR_SPACE_SZ 0x10000000 116 117 #define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 118 119 #define DEVICE_TYPE_RC 0x4 120 121 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 122 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 123 124 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 125 126 struct qcom_pcie_resources_2_1_0 { 127 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; 128 struct reset_control *pci_reset; 129 struct reset_control *axi_reset; 130 struct reset_control *ahb_reset; 131 struct reset_control *por_reset; 132 struct reset_control *phy_reset; 133 struct reset_control *ext_reset; 134 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 135 }; 136 137 struct qcom_pcie_resources_1_0_0 { 138 struct clk *iface; 139 struct clk *aux; 140 struct clk *master_bus; 141 struct clk *slave_bus; 142 struct reset_control *core; 143 struct regulator *vdda; 144 }; 145 146 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 147 struct qcom_pcie_resources_2_3_2 { 148 struct clk *aux_clk; 149 struct clk *master_clk; 150 struct clk *slave_clk; 151 struct clk *cfg_clk; 152 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 153 }; 154 155 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 156 struct qcom_pcie_resources_2_4_0 { 157 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; 158 int num_clks; 159 struct reset_control *axi_m_reset; 160 struct reset_control *axi_s_reset; 161 struct reset_control *pipe_reset; 162 struct reset_control *axi_m_vmid_reset; 163 struct reset_control *axi_s_xpu_reset; 164 struct reset_control *parf_reset; 165 struct reset_control *phy_reset; 166 struct reset_control *axi_m_sticky_reset; 167 struct reset_control *pipe_sticky_reset; 168 struct reset_control *pwr_reset; 169 struct reset_control *ahb_reset; 170 struct reset_control *phy_ahb_reset; 171 }; 172 173 struct qcom_pcie_resources_2_3_3 { 174 struct clk *iface; 175 struct clk *axi_m_clk; 176 struct clk *axi_s_clk; 177 struct clk *ahb_clk; 178 struct clk *aux_clk; 179 struct reset_control *rst[7]; 180 }; 181 182 /* 6 clocks typically, 7 for sm8250 */ 183 struct qcom_pcie_resources_2_7_0 { 184 struct clk_bulk_data clks[12]; 185 int num_clks; 186 struct regulator_bulk_data supplies[2]; 187 struct reset_control *pci_reset; 188 }; 189 190 struct qcom_pcie_resources_2_9_0 { 191 struct clk_bulk_data clks[5]; 192 struct reset_control *rst; 193 }; 194 195 union qcom_pcie_resources { 196 struct qcom_pcie_resources_1_0_0 v1_0_0; 197 struct qcom_pcie_resources_2_1_0 v2_1_0; 198 struct qcom_pcie_resources_2_3_2 v2_3_2; 199 struct qcom_pcie_resources_2_3_3 v2_3_3; 200 struct qcom_pcie_resources_2_4_0 v2_4_0; 201 struct qcom_pcie_resources_2_7_0 v2_7_0; 202 struct qcom_pcie_resources_2_9_0 v2_9_0; 203 }; 204 205 struct qcom_pcie; 206 207 struct qcom_pcie_ops { 208 int (*get_resources)(struct qcom_pcie *pcie); 209 int (*init)(struct qcom_pcie *pcie); 210 int (*post_init)(struct qcom_pcie *pcie); 211 void (*deinit)(struct qcom_pcie *pcie); 212 void (*ltssm_enable)(struct qcom_pcie *pcie); 213 int (*config_sid)(struct qcom_pcie *pcie); 214 }; 215 216 struct qcom_pcie_cfg { 217 const struct qcom_pcie_ops *ops; 218 }; 219 220 struct qcom_pcie { 221 struct dw_pcie *pci; 222 void __iomem *parf; /* DT parf */ 223 void __iomem *elbi; /* DT elbi */ 224 union qcom_pcie_resources res; 225 struct phy *phy; 226 struct gpio_desc *reset; 227 struct icc_path *icc_mem; 228 const struct qcom_pcie_cfg *cfg; 229 }; 230 231 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 232 233 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 234 { 235 gpiod_set_value_cansleep(pcie->reset, 1); 236 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 237 } 238 239 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 240 { 241 /* Ensure that PERST has been asserted for at least 100 ms */ 242 msleep(100); 243 gpiod_set_value_cansleep(pcie->reset, 0); 244 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 245 } 246 247 static int qcom_pcie_start_link(struct dw_pcie *pci) 248 { 249 struct qcom_pcie *pcie = to_qcom_pcie(pci); 250 251 /* Enable Link Training state machine */ 252 if (pcie->cfg->ops->ltssm_enable) 253 pcie->cfg->ops->ltssm_enable(pcie); 254 255 return 0; 256 } 257 258 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 259 { 260 u32 val; 261 262 /* enable link training */ 263 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); 264 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; 265 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); 266 } 267 268 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 269 { 270 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 271 struct dw_pcie *pci = pcie->pci; 272 struct device *dev = pci->dev; 273 int ret; 274 275 res->supplies[0].supply = "vdda"; 276 res->supplies[1].supply = "vdda_phy"; 277 res->supplies[2].supply = "vdda_refclk"; 278 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 279 res->supplies); 280 if (ret) 281 return ret; 282 283 res->clks[0].id = "iface"; 284 res->clks[1].id = "core"; 285 res->clks[2].id = "phy"; 286 res->clks[3].id = "aux"; 287 res->clks[4].id = "ref"; 288 289 /* iface, core, phy are required */ 290 ret = devm_clk_bulk_get(dev, 3, res->clks); 291 if (ret < 0) 292 return ret; 293 294 /* aux, ref are optional */ 295 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); 296 if (ret < 0) 297 return ret; 298 299 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 300 if (IS_ERR(res->pci_reset)) 301 return PTR_ERR(res->pci_reset); 302 303 res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); 304 if (IS_ERR(res->axi_reset)) 305 return PTR_ERR(res->axi_reset); 306 307 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 308 if (IS_ERR(res->ahb_reset)) 309 return PTR_ERR(res->ahb_reset); 310 311 res->por_reset = devm_reset_control_get_exclusive(dev, "por"); 312 if (IS_ERR(res->por_reset)) 313 return PTR_ERR(res->por_reset); 314 315 res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); 316 if (IS_ERR(res->ext_reset)) 317 return PTR_ERR(res->ext_reset); 318 319 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 320 return PTR_ERR_OR_ZERO(res->phy_reset); 321 } 322 323 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 324 { 325 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 326 327 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 328 reset_control_assert(res->pci_reset); 329 reset_control_assert(res->axi_reset); 330 reset_control_assert(res->ahb_reset); 331 reset_control_assert(res->por_reset); 332 reset_control_assert(res->ext_reset); 333 reset_control_assert(res->phy_reset); 334 335 writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); 336 337 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 338 } 339 340 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 341 { 342 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 343 struct dw_pcie *pci = pcie->pci; 344 struct device *dev = pci->dev; 345 int ret; 346 347 /* reset the PCIe interface as uboot can leave it undefined state */ 348 reset_control_assert(res->pci_reset); 349 reset_control_assert(res->axi_reset); 350 reset_control_assert(res->ahb_reset); 351 reset_control_assert(res->por_reset); 352 reset_control_assert(res->ext_reset); 353 reset_control_assert(res->phy_reset); 354 355 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 356 if (ret < 0) { 357 dev_err(dev, "cannot enable regulators\n"); 358 return ret; 359 } 360 361 ret = reset_control_deassert(res->ahb_reset); 362 if (ret) { 363 dev_err(dev, "cannot deassert ahb reset\n"); 364 goto err_deassert_ahb; 365 } 366 367 ret = reset_control_deassert(res->ext_reset); 368 if (ret) { 369 dev_err(dev, "cannot deassert ext reset\n"); 370 goto err_deassert_ext; 371 } 372 373 ret = reset_control_deassert(res->phy_reset); 374 if (ret) { 375 dev_err(dev, "cannot deassert phy reset\n"); 376 goto err_deassert_phy; 377 } 378 379 ret = reset_control_deassert(res->pci_reset); 380 if (ret) { 381 dev_err(dev, "cannot deassert pci reset\n"); 382 goto err_deassert_pci; 383 } 384 385 ret = reset_control_deassert(res->por_reset); 386 if (ret) { 387 dev_err(dev, "cannot deassert por reset\n"); 388 goto err_deassert_por; 389 } 390 391 ret = reset_control_deassert(res->axi_reset); 392 if (ret) { 393 dev_err(dev, "cannot deassert axi reset\n"); 394 goto err_deassert_axi; 395 } 396 397 return 0; 398 399 err_deassert_axi: 400 reset_control_assert(res->por_reset); 401 err_deassert_por: 402 reset_control_assert(res->pci_reset); 403 err_deassert_pci: 404 reset_control_assert(res->phy_reset); 405 err_deassert_phy: 406 reset_control_assert(res->ext_reset); 407 err_deassert_ext: 408 reset_control_assert(res->ahb_reset); 409 err_deassert_ahb: 410 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 411 412 return ret; 413 } 414 415 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 416 { 417 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 418 struct dw_pcie *pci = pcie->pci; 419 struct device *dev = pci->dev; 420 struct device_node *node = dev->of_node; 421 u32 val; 422 int ret; 423 424 /* enable PCIe clocks and resets */ 425 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 426 val &= ~BIT(0); 427 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 428 429 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 430 if (ret) 431 return ret; 432 433 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 434 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 435 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 436 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 437 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 438 pcie->parf + PCIE20_PARF_PCS_DEEMPH); 439 writel(PCS_SWING_TX_SWING_FULL(120) | 440 PCS_SWING_TX_SWING_LOW(120), 441 pcie->parf + PCIE20_PARF_PCS_SWING); 442 writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); 443 } 444 445 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 446 /* set TX termination offset */ 447 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 448 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 449 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 450 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 451 } 452 453 /* enable external reference clock */ 454 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); 455 /* USE_PAD is required only for ipq806x */ 456 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 457 val &= ~PHY_REFCLK_USE_PAD; 458 val |= PHY_REFCLK_SSP_EN; 459 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); 460 461 /* wait for clock acquisition */ 462 usleep_range(1000, 1500); 463 464 /* Set the Max TLP size to 2K, instead of using default of 4K */ 465 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 466 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); 467 writel(CFG_BRIDGE_SB_INIT, 468 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); 469 470 return 0; 471 } 472 473 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 474 { 475 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 476 struct dw_pcie *pci = pcie->pci; 477 struct device *dev = pci->dev; 478 479 res->vdda = devm_regulator_get(dev, "vdda"); 480 if (IS_ERR(res->vdda)) 481 return PTR_ERR(res->vdda); 482 483 res->iface = devm_clk_get(dev, "iface"); 484 if (IS_ERR(res->iface)) 485 return PTR_ERR(res->iface); 486 487 res->aux = devm_clk_get(dev, "aux"); 488 if (IS_ERR(res->aux)) 489 return PTR_ERR(res->aux); 490 491 res->master_bus = devm_clk_get(dev, "master_bus"); 492 if (IS_ERR(res->master_bus)) 493 return PTR_ERR(res->master_bus); 494 495 res->slave_bus = devm_clk_get(dev, "slave_bus"); 496 if (IS_ERR(res->slave_bus)) 497 return PTR_ERR(res->slave_bus); 498 499 res->core = devm_reset_control_get_exclusive(dev, "core"); 500 return PTR_ERR_OR_ZERO(res->core); 501 } 502 503 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 504 { 505 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 506 507 reset_control_assert(res->core); 508 clk_disable_unprepare(res->slave_bus); 509 clk_disable_unprepare(res->master_bus); 510 clk_disable_unprepare(res->iface); 511 clk_disable_unprepare(res->aux); 512 regulator_disable(res->vdda); 513 } 514 515 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 516 { 517 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 518 struct dw_pcie *pci = pcie->pci; 519 struct device *dev = pci->dev; 520 int ret; 521 522 ret = reset_control_deassert(res->core); 523 if (ret) { 524 dev_err(dev, "cannot deassert core reset\n"); 525 return ret; 526 } 527 528 ret = clk_prepare_enable(res->aux); 529 if (ret) { 530 dev_err(dev, "cannot prepare/enable aux clock\n"); 531 goto err_res; 532 } 533 534 ret = clk_prepare_enable(res->iface); 535 if (ret) { 536 dev_err(dev, "cannot prepare/enable iface clock\n"); 537 goto err_aux; 538 } 539 540 ret = clk_prepare_enable(res->master_bus); 541 if (ret) { 542 dev_err(dev, "cannot prepare/enable master_bus clock\n"); 543 goto err_iface; 544 } 545 546 ret = clk_prepare_enable(res->slave_bus); 547 if (ret) { 548 dev_err(dev, "cannot prepare/enable slave_bus clock\n"); 549 goto err_master; 550 } 551 552 ret = regulator_enable(res->vdda); 553 if (ret) { 554 dev_err(dev, "cannot enable vdda regulator\n"); 555 goto err_slave; 556 } 557 558 return 0; 559 err_slave: 560 clk_disable_unprepare(res->slave_bus); 561 err_master: 562 clk_disable_unprepare(res->master_bus); 563 err_iface: 564 clk_disable_unprepare(res->iface); 565 err_aux: 566 clk_disable_unprepare(res->aux); 567 err_res: 568 reset_control_assert(res->core); 569 570 return ret; 571 } 572 573 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 574 { 575 /* change DBI base address */ 576 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 577 578 if (IS_ENABLED(CONFIG_PCI_MSI)) { 579 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 580 581 val |= BIT(31); 582 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 583 } 584 585 return 0; 586 } 587 588 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 589 { 590 u32 val; 591 592 /* enable link training */ 593 val = readl(pcie->parf + PCIE20_PARF_LTSSM); 594 val |= BIT(8); 595 writel(val, pcie->parf + PCIE20_PARF_LTSSM); 596 } 597 598 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 599 { 600 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 601 struct dw_pcie *pci = pcie->pci; 602 struct device *dev = pci->dev; 603 int ret; 604 605 res->supplies[0].supply = "vdda"; 606 res->supplies[1].supply = "vddpe-3v3"; 607 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 608 res->supplies); 609 if (ret) 610 return ret; 611 612 res->aux_clk = devm_clk_get(dev, "aux"); 613 if (IS_ERR(res->aux_clk)) 614 return PTR_ERR(res->aux_clk); 615 616 res->cfg_clk = devm_clk_get(dev, "cfg"); 617 if (IS_ERR(res->cfg_clk)) 618 return PTR_ERR(res->cfg_clk); 619 620 res->master_clk = devm_clk_get(dev, "bus_master"); 621 if (IS_ERR(res->master_clk)) 622 return PTR_ERR(res->master_clk); 623 624 res->slave_clk = devm_clk_get(dev, "bus_slave"); 625 if (IS_ERR(res->slave_clk)) 626 return PTR_ERR(res->slave_clk); 627 628 return 0; 629 } 630 631 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 632 { 633 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 634 635 clk_disable_unprepare(res->slave_clk); 636 clk_disable_unprepare(res->master_clk); 637 clk_disable_unprepare(res->cfg_clk); 638 clk_disable_unprepare(res->aux_clk); 639 640 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 641 } 642 643 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 644 { 645 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 646 struct dw_pcie *pci = pcie->pci; 647 struct device *dev = pci->dev; 648 int ret; 649 650 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 651 if (ret < 0) { 652 dev_err(dev, "cannot enable regulators\n"); 653 return ret; 654 } 655 656 ret = clk_prepare_enable(res->aux_clk); 657 if (ret) { 658 dev_err(dev, "cannot prepare/enable aux clock\n"); 659 goto err_aux_clk; 660 } 661 662 ret = clk_prepare_enable(res->cfg_clk); 663 if (ret) { 664 dev_err(dev, "cannot prepare/enable cfg clock\n"); 665 goto err_cfg_clk; 666 } 667 668 ret = clk_prepare_enable(res->master_clk); 669 if (ret) { 670 dev_err(dev, "cannot prepare/enable master clock\n"); 671 goto err_master_clk; 672 } 673 674 ret = clk_prepare_enable(res->slave_clk); 675 if (ret) { 676 dev_err(dev, "cannot prepare/enable slave clock\n"); 677 goto err_slave_clk; 678 } 679 680 return 0; 681 682 err_slave_clk: 683 clk_disable_unprepare(res->master_clk); 684 err_master_clk: 685 clk_disable_unprepare(res->cfg_clk); 686 err_cfg_clk: 687 clk_disable_unprepare(res->aux_clk); 688 689 err_aux_clk: 690 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 691 692 return ret; 693 } 694 695 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 696 { 697 u32 val; 698 699 /* enable PCIe clocks and resets */ 700 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 701 val &= ~BIT(0); 702 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 703 704 /* change DBI base address */ 705 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 706 707 /* MAC PHY_POWERDOWN MUX DISABLE */ 708 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 709 val &= ~BIT(29); 710 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 711 712 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 713 val |= BIT(4); 714 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 715 716 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 717 val |= BIT(31); 718 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 719 720 return 0; 721 } 722 723 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 724 { 725 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 726 struct dw_pcie *pci = pcie->pci; 727 struct device *dev = pci->dev; 728 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 729 int ret; 730 731 res->clks[0].id = "aux"; 732 res->clks[1].id = "master_bus"; 733 res->clks[2].id = "slave_bus"; 734 res->clks[3].id = "iface"; 735 736 /* qcom,pcie-ipq4019 is defined without "iface" */ 737 res->num_clks = is_ipq ? 3 : 4; 738 739 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 740 if (ret < 0) 741 return ret; 742 743 res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); 744 if (IS_ERR(res->axi_m_reset)) 745 return PTR_ERR(res->axi_m_reset); 746 747 res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); 748 if (IS_ERR(res->axi_s_reset)) 749 return PTR_ERR(res->axi_s_reset); 750 751 if (is_ipq) { 752 /* 753 * These resources relates to the PHY or are secure clocks, but 754 * are controlled here for IPQ4019 755 */ 756 res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); 757 if (IS_ERR(res->pipe_reset)) 758 return PTR_ERR(res->pipe_reset); 759 760 res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, 761 "axi_m_vmid"); 762 if (IS_ERR(res->axi_m_vmid_reset)) 763 return PTR_ERR(res->axi_m_vmid_reset); 764 765 res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, 766 "axi_s_xpu"); 767 if (IS_ERR(res->axi_s_xpu_reset)) 768 return PTR_ERR(res->axi_s_xpu_reset); 769 770 res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); 771 if (IS_ERR(res->parf_reset)) 772 return PTR_ERR(res->parf_reset); 773 774 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); 775 if (IS_ERR(res->phy_reset)) 776 return PTR_ERR(res->phy_reset); 777 } 778 779 res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, 780 "axi_m_sticky"); 781 if (IS_ERR(res->axi_m_sticky_reset)) 782 return PTR_ERR(res->axi_m_sticky_reset); 783 784 res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, 785 "pipe_sticky"); 786 if (IS_ERR(res->pipe_sticky_reset)) 787 return PTR_ERR(res->pipe_sticky_reset); 788 789 res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); 790 if (IS_ERR(res->pwr_reset)) 791 return PTR_ERR(res->pwr_reset); 792 793 res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); 794 if (IS_ERR(res->ahb_reset)) 795 return PTR_ERR(res->ahb_reset); 796 797 if (is_ipq) { 798 res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); 799 if (IS_ERR(res->phy_ahb_reset)) 800 return PTR_ERR(res->phy_ahb_reset); 801 } 802 803 return 0; 804 } 805 806 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 807 { 808 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 809 810 reset_control_assert(res->axi_m_reset); 811 reset_control_assert(res->axi_s_reset); 812 reset_control_assert(res->pipe_reset); 813 reset_control_assert(res->pipe_sticky_reset); 814 reset_control_assert(res->phy_reset); 815 reset_control_assert(res->phy_ahb_reset); 816 reset_control_assert(res->axi_m_sticky_reset); 817 reset_control_assert(res->pwr_reset); 818 reset_control_assert(res->ahb_reset); 819 clk_bulk_disable_unprepare(res->num_clks, res->clks); 820 } 821 822 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 823 { 824 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 825 struct dw_pcie *pci = pcie->pci; 826 struct device *dev = pci->dev; 827 int ret; 828 829 ret = reset_control_assert(res->axi_m_reset); 830 if (ret) { 831 dev_err(dev, "cannot assert axi master reset\n"); 832 return ret; 833 } 834 835 ret = reset_control_assert(res->axi_s_reset); 836 if (ret) { 837 dev_err(dev, "cannot assert axi slave reset\n"); 838 return ret; 839 } 840 841 usleep_range(10000, 12000); 842 843 ret = reset_control_assert(res->pipe_reset); 844 if (ret) { 845 dev_err(dev, "cannot assert pipe reset\n"); 846 return ret; 847 } 848 849 ret = reset_control_assert(res->pipe_sticky_reset); 850 if (ret) { 851 dev_err(dev, "cannot assert pipe sticky reset\n"); 852 return ret; 853 } 854 855 ret = reset_control_assert(res->phy_reset); 856 if (ret) { 857 dev_err(dev, "cannot assert phy reset\n"); 858 return ret; 859 } 860 861 ret = reset_control_assert(res->phy_ahb_reset); 862 if (ret) { 863 dev_err(dev, "cannot assert phy ahb reset\n"); 864 return ret; 865 } 866 867 usleep_range(10000, 12000); 868 869 ret = reset_control_assert(res->axi_m_sticky_reset); 870 if (ret) { 871 dev_err(dev, "cannot assert axi master sticky reset\n"); 872 return ret; 873 } 874 875 ret = reset_control_assert(res->pwr_reset); 876 if (ret) { 877 dev_err(dev, "cannot assert power reset\n"); 878 return ret; 879 } 880 881 ret = reset_control_assert(res->ahb_reset); 882 if (ret) { 883 dev_err(dev, "cannot assert ahb reset\n"); 884 return ret; 885 } 886 887 usleep_range(10000, 12000); 888 889 ret = reset_control_deassert(res->phy_ahb_reset); 890 if (ret) { 891 dev_err(dev, "cannot deassert phy ahb reset\n"); 892 return ret; 893 } 894 895 ret = reset_control_deassert(res->phy_reset); 896 if (ret) { 897 dev_err(dev, "cannot deassert phy reset\n"); 898 goto err_rst_phy; 899 } 900 901 ret = reset_control_deassert(res->pipe_reset); 902 if (ret) { 903 dev_err(dev, "cannot deassert pipe reset\n"); 904 goto err_rst_pipe; 905 } 906 907 ret = reset_control_deassert(res->pipe_sticky_reset); 908 if (ret) { 909 dev_err(dev, "cannot deassert pipe sticky reset\n"); 910 goto err_rst_pipe_sticky; 911 } 912 913 usleep_range(10000, 12000); 914 915 ret = reset_control_deassert(res->axi_m_reset); 916 if (ret) { 917 dev_err(dev, "cannot deassert axi master reset\n"); 918 goto err_rst_axi_m; 919 } 920 921 ret = reset_control_deassert(res->axi_m_sticky_reset); 922 if (ret) { 923 dev_err(dev, "cannot deassert axi master sticky reset\n"); 924 goto err_rst_axi_m_sticky; 925 } 926 927 ret = reset_control_deassert(res->axi_s_reset); 928 if (ret) { 929 dev_err(dev, "cannot deassert axi slave reset\n"); 930 goto err_rst_axi_s; 931 } 932 933 ret = reset_control_deassert(res->pwr_reset); 934 if (ret) { 935 dev_err(dev, "cannot deassert power reset\n"); 936 goto err_rst_pwr; 937 } 938 939 ret = reset_control_deassert(res->ahb_reset); 940 if (ret) { 941 dev_err(dev, "cannot deassert ahb reset\n"); 942 goto err_rst_ahb; 943 } 944 945 usleep_range(10000, 12000); 946 947 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 948 if (ret) 949 goto err_clks; 950 951 return 0; 952 953 err_clks: 954 reset_control_assert(res->ahb_reset); 955 err_rst_ahb: 956 reset_control_assert(res->pwr_reset); 957 err_rst_pwr: 958 reset_control_assert(res->axi_s_reset); 959 err_rst_axi_s: 960 reset_control_assert(res->axi_m_sticky_reset); 961 err_rst_axi_m_sticky: 962 reset_control_assert(res->axi_m_reset); 963 err_rst_axi_m: 964 reset_control_assert(res->pipe_sticky_reset); 965 err_rst_pipe_sticky: 966 reset_control_assert(res->pipe_reset); 967 err_rst_pipe: 968 reset_control_assert(res->phy_reset); 969 err_rst_phy: 970 reset_control_assert(res->phy_ahb_reset); 971 return ret; 972 } 973 974 static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie) 975 { 976 u32 val; 977 978 /* enable PCIe clocks and resets */ 979 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 980 val &= ~BIT(0); 981 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 982 983 /* change DBI base address */ 984 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 985 986 /* MAC PHY_POWERDOWN MUX DISABLE */ 987 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 988 val &= ~BIT(29); 989 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 990 991 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 992 val |= BIT(4); 993 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 994 995 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 996 val |= BIT(31); 997 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); 998 999 return 0; 1000 } 1001 1002 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 1003 { 1004 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1005 struct dw_pcie *pci = pcie->pci; 1006 struct device *dev = pci->dev; 1007 int i; 1008 const char *rst_names[] = { "axi_m", "axi_s", "pipe", 1009 "axi_m_sticky", "sticky", 1010 "ahb", "sleep", }; 1011 1012 res->iface = devm_clk_get(dev, "iface"); 1013 if (IS_ERR(res->iface)) 1014 return PTR_ERR(res->iface); 1015 1016 res->axi_m_clk = devm_clk_get(dev, "axi_m"); 1017 if (IS_ERR(res->axi_m_clk)) 1018 return PTR_ERR(res->axi_m_clk); 1019 1020 res->axi_s_clk = devm_clk_get(dev, "axi_s"); 1021 if (IS_ERR(res->axi_s_clk)) 1022 return PTR_ERR(res->axi_s_clk); 1023 1024 res->ahb_clk = devm_clk_get(dev, "ahb"); 1025 if (IS_ERR(res->ahb_clk)) 1026 return PTR_ERR(res->ahb_clk); 1027 1028 res->aux_clk = devm_clk_get(dev, "aux"); 1029 if (IS_ERR(res->aux_clk)) 1030 return PTR_ERR(res->aux_clk); 1031 1032 for (i = 0; i < ARRAY_SIZE(rst_names); i++) { 1033 res->rst[i] = devm_reset_control_get(dev, rst_names[i]); 1034 if (IS_ERR(res->rst[i])) 1035 return PTR_ERR(res->rst[i]); 1036 } 1037 1038 return 0; 1039 } 1040 1041 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 1042 { 1043 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1044 1045 clk_disable_unprepare(res->iface); 1046 clk_disable_unprepare(res->axi_m_clk); 1047 clk_disable_unprepare(res->axi_s_clk); 1048 clk_disable_unprepare(res->ahb_clk); 1049 clk_disable_unprepare(res->aux_clk); 1050 } 1051 1052 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 1053 { 1054 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 1055 struct dw_pcie *pci = pcie->pci; 1056 struct device *dev = pci->dev; 1057 int i, ret; 1058 1059 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1060 ret = reset_control_assert(res->rst[i]); 1061 if (ret) { 1062 dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); 1063 return ret; 1064 } 1065 } 1066 1067 usleep_range(2000, 2500); 1068 1069 for (i = 0; i < ARRAY_SIZE(res->rst); i++) { 1070 ret = reset_control_deassert(res->rst[i]); 1071 if (ret) { 1072 dev_err(dev, "reset #%d deassert failed (%d)\n", i, 1073 ret); 1074 return ret; 1075 } 1076 } 1077 1078 /* 1079 * Don't have a way to see if the reset has completed. 1080 * Wait for some time. 1081 */ 1082 usleep_range(2000, 2500); 1083 1084 ret = clk_prepare_enable(res->iface); 1085 if (ret) { 1086 dev_err(dev, "cannot prepare/enable core clock\n"); 1087 goto err_clk_iface; 1088 } 1089 1090 ret = clk_prepare_enable(res->axi_m_clk); 1091 if (ret) { 1092 dev_err(dev, "cannot prepare/enable core clock\n"); 1093 goto err_clk_axi_m; 1094 } 1095 1096 ret = clk_prepare_enable(res->axi_s_clk); 1097 if (ret) { 1098 dev_err(dev, "cannot prepare/enable axi slave clock\n"); 1099 goto err_clk_axi_s; 1100 } 1101 1102 ret = clk_prepare_enable(res->ahb_clk); 1103 if (ret) { 1104 dev_err(dev, "cannot prepare/enable ahb clock\n"); 1105 goto err_clk_ahb; 1106 } 1107 1108 ret = clk_prepare_enable(res->aux_clk); 1109 if (ret) { 1110 dev_err(dev, "cannot prepare/enable aux clock\n"); 1111 goto err_clk_aux; 1112 } 1113 1114 return 0; 1115 1116 err_clk_aux: 1117 clk_disable_unprepare(res->ahb_clk); 1118 err_clk_ahb: 1119 clk_disable_unprepare(res->axi_s_clk); 1120 err_clk_axi_s: 1121 clk_disable_unprepare(res->axi_m_clk); 1122 err_clk_axi_m: 1123 clk_disable_unprepare(res->iface); 1124 err_clk_iface: 1125 /* 1126 * Not checking for failure, will anyway return 1127 * the original failure in 'ret'. 1128 */ 1129 for (i = 0; i < ARRAY_SIZE(res->rst); i++) 1130 reset_control_assert(res->rst[i]); 1131 1132 return ret; 1133 } 1134 1135 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 1136 { 1137 struct dw_pcie *pci = pcie->pci; 1138 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1139 u32 val; 1140 1141 writel(SLV_ADDR_SPACE_SZ, 1142 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1143 1144 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1145 val &= ~BIT(0); 1146 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1147 1148 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1149 1150 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 1151 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1152 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1153 pcie->parf + PCIE20_PARF_SYS_CTRL); 1154 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1155 1156 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 1157 writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); 1158 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1159 1160 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1161 val &= ~PCI_EXP_LNKCAP_ASPMS; 1162 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1163 1164 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1165 PCI_EXP_DEVCTL2); 1166 1167 return 0; 1168 } 1169 1170 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 1171 { 1172 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1173 struct dw_pcie *pci = pcie->pci; 1174 struct device *dev = pci->dev; 1175 unsigned int num_clks, num_opt_clks; 1176 unsigned int idx; 1177 int ret; 1178 1179 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); 1180 if (IS_ERR(res->pci_reset)) 1181 return PTR_ERR(res->pci_reset); 1182 1183 res->supplies[0].supply = "vdda"; 1184 res->supplies[1].supply = "vddpe-3v3"; 1185 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 1186 res->supplies); 1187 if (ret) 1188 return ret; 1189 1190 idx = 0; 1191 res->clks[idx++].id = "aux"; 1192 res->clks[idx++].id = "cfg"; 1193 res->clks[idx++].id = "bus_master"; 1194 res->clks[idx++].id = "bus_slave"; 1195 res->clks[idx++].id = "slave_q2a"; 1196 1197 num_clks = idx; 1198 1199 ret = devm_clk_bulk_get(dev, num_clks, res->clks); 1200 if (ret < 0) 1201 return ret; 1202 1203 res->clks[idx++].id = "tbu"; 1204 res->clks[idx++].id = "ddrss_sf_tbu"; 1205 res->clks[idx++].id = "aggre0"; 1206 res->clks[idx++].id = "aggre1"; 1207 res->clks[idx++].id = "noc_aggr_4"; 1208 res->clks[idx++].id = "noc_aggr_south_sf"; 1209 res->clks[idx++].id = "cnoc_qx"; 1210 1211 num_opt_clks = idx - num_clks; 1212 res->num_clks = idx; 1213 1214 ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks); 1215 if (ret < 0) 1216 return ret; 1217 1218 return 0; 1219 } 1220 1221 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 1222 { 1223 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1224 struct dw_pcie *pci = pcie->pci; 1225 struct device *dev = pci->dev; 1226 u32 val; 1227 int ret; 1228 1229 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 1230 if (ret < 0) { 1231 dev_err(dev, "cannot enable regulators\n"); 1232 return ret; 1233 } 1234 1235 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 1236 if (ret < 0) 1237 goto err_disable_regulators; 1238 1239 ret = reset_control_assert(res->pci_reset); 1240 if (ret < 0) { 1241 dev_err(dev, "cannot assert pci reset\n"); 1242 goto err_disable_clocks; 1243 } 1244 1245 usleep_range(1000, 1500); 1246 1247 ret = reset_control_deassert(res->pci_reset); 1248 if (ret < 0) { 1249 dev_err(dev, "cannot deassert pci reset\n"); 1250 goto err_disable_clocks; 1251 } 1252 1253 /* Wait for reset to complete, required on SM8450 */ 1254 usleep_range(1000, 1500); 1255 1256 /* configure PCIe to RC mode */ 1257 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1258 1259 /* enable PCIe clocks and resets */ 1260 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1261 val &= ~BIT(0); 1262 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1263 1264 /* change DBI base address */ 1265 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1266 1267 /* MAC PHY_POWERDOWN MUX DISABLE */ 1268 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); 1269 val &= ~BIT(29); 1270 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); 1271 1272 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1273 val |= BIT(4); 1274 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1275 1276 /* Enable L1 and L1SS */ 1277 val = readl(pcie->parf + PCIE20_PARF_PM_CTRL); 1278 val &= ~REQ_NOT_ENTR_L1; 1279 writel(val, pcie->parf + PCIE20_PARF_PM_CTRL); 1280 1281 if (IS_ENABLED(CONFIG_PCI_MSI)) { 1282 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1283 val |= BIT(31); 1284 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); 1285 } 1286 1287 return 0; 1288 err_disable_clocks: 1289 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1290 err_disable_regulators: 1291 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1292 1293 return ret; 1294 } 1295 1296 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 1297 { 1298 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 1299 1300 clk_bulk_disable_unprepare(res->num_clks, res->clks); 1301 1302 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 1303 } 1304 1305 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1306 { 1307 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1308 struct dw_pcie *pci = pcie->pci; 1309 struct device *dev = pci->dev; 1310 int ret; 1311 1312 res->clks[0].id = "iface"; 1313 res->clks[1].id = "axi_m"; 1314 res->clks[2].id = "axi_s"; 1315 res->clks[3].id = "axi_bridge"; 1316 res->clks[4].id = "rchng"; 1317 1318 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 1319 if (ret < 0) 1320 return ret; 1321 1322 res->rst = devm_reset_control_array_get_exclusive(dev); 1323 if (IS_ERR(res->rst)) 1324 return PTR_ERR(res->rst); 1325 1326 return 0; 1327 } 1328 1329 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1330 { 1331 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1332 1333 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 1334 } 1335 1336 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1337 { 1338 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1339 struct device *dev = pcie->pci->dev; 1340 int ret; 1341 1342 ret = reset_control_assert(res->rst); 1343 if (ret) { 1344 dev_err(dev, "reset assert failed (%d)\n", ret); 1345 return ret; 1346 } 1347 1348 /* 1349 * Delay periods before and after reset deassert are working values 1350 * from downstream Codeaurora kernel 1351 */ 1352 usleep_range(2000, 2500); 1353 1354 ret = reset_control_deassert(res->rst); 1355 if (ret) { 1356 dev_err(dev, "reset deassert failed (%d)\n", ret); 1357 return ret; 1358 } 1359 1360 usleep_range(2000, 2500); 1361 1362 return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 1363 } 1364 1365 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1366 { 1367 struct dw_pcie *pci = pcie->pci; 1368 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1369 u32 val; 1370 int i; 1371 1372 writel(SLV_ADDR_SPACE_SZ, 1373 pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); 1374 1375 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); 1376 val &= ~BIT(0); 1377 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); 1378 1379 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); 1380 1381 writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE); 1382 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1383 pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); 1384 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1385 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1386 pci->dbi_base + GEN3_RELATED_OFF); 1387 1388 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1389 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1390 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1391 pcie->parf + PCIE20_PARF_SYS_CTRL); 1392 1393 writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); 1394 1395 dw_pcie_dbi_ro_wr_en(pci); 1396 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1397 1398 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1399 val &= ~PCI_EXP_LNKCAP_ASPMS; 1400 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1401 1402 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1403 PCI_EXP_DEVCTL2); 1404 1405 for (i = 0; i < 256; i++) 1406 writel(0, pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1407 1408 return 0; 1409 } 1410 1411 static int qcom_pcie_link_up(struct dw_pcie *pci) 1412 { 1413 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1414 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1415 1416 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1417 } 1418 1419 static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie) 1420 { 1421 /* iommu map structure */ 1422 struct { 1423 u32 bdf; 1424 u32 phandle; 1425 u32 smmu_sid; 1426 u32 smmu_sid_len; 1427 } *map; 1428 void __iomem *bdf_to_sid_base = pcie->parf + PCIE20_PARF_BDF_TO_SID_TABLE_N; 1429 struct device *dev = pcie->pci->dev; 1430 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1431 int i, nr_map, size = 0; 1432 u32 smmu_sid_base; 1433 1434 of_get_property(dev->of_node, "iommu-map", &size); 1435 if (!size) 1436 return 0; 1437 1438 map = kzalloc(size, GFP_KERNEL); 1439 if (!map) 1440 return -ENOMEM; 1441 1442 of_property_read_u32_array(dev->of_node, 1443 "iommu-map", (u32 *)map, size / sizeof(u32)); 1444 1445 nr_map = size / (sizeof(*map)); 1446 1447 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1448 1449 /* Registers need to be zero out first */ 1450 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1451 1452 /* Extract the SMMU SID base from the first entry of iommu-map */ 1453 smmu_sid_base = map[0].smmu_sid; 1454 1455 /* Look for an available entry to hold the mapping */ 1456 for (i = 0; i < nr_map; i++) { 1457 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1458 u32 val; 1459 u8 hash; 1460 1461 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 1462 0); 1463 1464 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1465 1466 /* If the register is already populated, look for next available entry */ 1467 while (val) { 1468 u8 current_hash = hash++; 1469 u8 next_mask = 0xff; 1470 1471 /* If NEXT field is NULL then update it with next hash */ 1472 if (!(val & next_mask)) { 1473 val |= (u32)hash; 1474 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1475 } 1476 1477 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1478 } 1479 1480 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1481 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1482 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1483 } 1484 1485 kfree(map); 1486 1487 return 0; 1488 } 1489 1490 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1491 { 1492 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1493 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1494 int ret; 1495 1496 qcom_ep_reset_assert(pcie); 1497 1498 ret = pcie->cfg->ops->init(pcie); 1499 if (ret) 1500 return ret; 1501 1502 ret = phy_power_on(pcie->phy); 1503 if (ret) 1504 goto err_deinit; 1505 1506 if (pcie->cfg->ops->post_init) { 1507 ret = pcie->cfg->ops->post_init(pcie); 1508 if (ret) 1509 goto err_disable_phy; 1510 } 1511 1512 qcom_ep_reset_deassert(pcie); 1513 1514 if (pcie->cfg->ops->config_sid) { 1515 ret = pcie->cfg->ops->config_sid(pcie); 1516 if (ret) 1517 goto err_assert_reset; 1518 } 1519 1520 return 0; 1521 1522 err_assert_reset: 1523 qcom_ep_reset_assert(pcie); 1524 err_disable_phy: 1525 phy_power_off(pcie->phy); 1526 err_deinit: 1527 pcie->cfg->ops->deinit(pcie); 1528 1529 return ret; 1530 } 1531 1532 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1533 .host_init = qcom_pcie_host_init, 1534 }; 1535 1536 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1537 static const struct qcom_pcie_ops ops_2_1_0 = { 1538 .get_resources = qcom_pcie_get_resources_2_1_0, 1539 .init = qcom_pcie_init_2_1_0, 1540 .post_init = qcom_pcie_post_init_2_1_0, 1541 .deinit = qcom_pcie_deinit_2_1_0, 1542 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1543 }; 1544 1545 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1546 static const struct qcom_pcie_ops ops_1_0_0 = { 1547 .get_resources = qcom_pcie_get_resources_1_0_0, 1548 .init = qcom_pcie_init_1_0_0, 1549 .post_init = qcom_pcie_post_init_1_0_0, 1550 .deinit = qcom_pcie_deinit_1_0_0, 1551 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1552 }; 1553 1554 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1555 static const struct qcom_pcie_ops ops_2_3_2 = { 1556 .get_resources = qcom_pcie_get_resources_2_3_2, 1557 .init = qcom_pcie_init_2_3_2, 1558 .post_init = qcom_pcie_post_init_2_3_2, 1559 .deinit = qcom_pcie_deinit_2_3_2, 1560 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1561 }; 1562 1563 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1564 static const struct qcom_pcie_ops ops_2_4_0 = { 1565 .get_resources = qcom_pcie_get_resources_2_4_0, 1566 .init = qcom_pcie_init_2_4_0, 1567 .post_init = qcom_pcie_post_init_2_4_0, 1568 .deinit = qcom_pcie_deinit_2_4_0, 1569 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1570 }; 1571 1572 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1573 static const struct qcom_pcie_ops ops_2_3_3 = { 1574 .get_resources = qcom_pcie_get_resources_2_3_3, 1575 .init = qcom_pcie_init_2_3_3, 1576 .post_init = qcom_pcie_post_init_2_3_3, 1577 .deinit = qcom_pcie_deinit_2_3_3, 1578 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1579 }; 1580 1581 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1582 static const struct qcom_pcie_ops ops_2_7_0 = { 1583 .get_resources = qcom_pcie_get_resources_2_7_0, 1584 .init = qcom_pcie_init_2_7_0, 1585 .deinit = qcom_pcie_deinit_2_7_0, 1586 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1587 }; 1588 1589 /* Qcom IP rev.: 1.9.0 */ 1590 static const struct qcom_pcie_ops ops_1_9_0 = { 1591 .get_resources = qcom_pcie_get_resources_2_7_0, 1592 .init = qcom_pcie_init_2_7_0, 1593 .deinit = qcom_pcie_deinit_2_7_0, 1594 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1595 .config_sid = qcom_pcie_config_sid_sm8250, 1596 }; 1597 1598 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1599 static const struct qcom_pcie_ops ops_2_9_0 = { 1600 .get_resources = qcom_pcie_get_resources_2_9_0, 1601 .init = qcom_pcie_init_2_9_0, 1602 .post_init = qcom_pcie_post_init_2_9_0, 1603 .deinit = qcom_pcie_deinit_2_9_0, 1604 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1605 }; 1606 1607 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1608 .ops = &ops_1_0_0, 1609 }; 1610 1611 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1612 .ops = &ops_1_9_0, 1613 }; 1614 1615 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1616 .ops = &ops_2_1_0, 1617 }; 1618 1619 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1620 .ops = &ops_2_3_2, 1621 }; 1622 1623 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1624 .ops = &ops_2_3_3, 1625 }; 1626 1627 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1628 .ops = &ops_2_4_0, 1629 }; 1630 1631 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1632 .ops = &ops_2_7_0, 1633 }; 1634 1635 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1636 .ops = &ops_2_9_0, 1637 }; 1638 1639 static const struct dw_pcie_ops dw_pcie_ops = { 1640 .link_up = qcom_pcie_link_up, 1641 .start_link = qcom_pcie_start_link, 1642 }; 1643 1644 static int qcom_pcie_icc_init(struct qcom_pcie *pcie) 1645 { 1646 struct dw_pcie *pci = pcie->pci; 1647 int ret; 1648 1649 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); 1650 if (IS_ERR(pcie->icc_mem)) 1651 return PTR_ERR(pcie->icc_mem); 1652 1653 /* 1654 * Some Qualcomm platforms require interconnect bandwidth constraints 1655 * to be set before enabling interconnect clocks. 1656 * 1657 * Set an initial peak bandwidth corresponding to single-lane Gen 1 1658 * for the pcie-mem path. 1659 */ 1660 ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250)); 1661 if (ret) { 1662 dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", 1663 ret); 1664 return ret; 1665 } 1666 1667 return 0; 1668 } 1669 1670 static void qcom_pcie_icc_update(struct qcom_pcie *pcie) 1671 { 1672 struct dw_pcie *pci = pcie->pci; 1673 u32 offset, status, bw; 1674 int speed, width; 1675 int ret; 1676 1677 if (!pcie->icc_mem) 1678 return; 1679 1680 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1681 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1682 1683 /* Only update constraints if link is up. */ 1684 if (!(status & PCI_EXP_LNKSTA_DLLLA)) 1685 return; 1686 1687 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); 1688 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); 1689 1690 switch (speed) { 1691 case 1: 1692 bw = MBps_to_icc(250); 1693 break; 1694 case 2: 1695 bw = MBps_to_icc(500); 1696 break; 1697 default: 1698 WARN_ON_ONCE(1); 1699 fallthrough; 1700 case 3: 1701 bw = MBps_to_icc(985); 1702 break; 1703 } 1704 1705 ret = icc_set_bw(pcie->icc_mem, 0, width * bw); 1706 if (ret) { 1707 dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", 1708 ret); 1709 } 1710 } 1711 1712 static int qcom_pcie_probe(struct platform_device *pdev) 1713 { 1714 struct device *dev = &pdev->dev; 1715 struct dw_pcie_rp *pp; 1716 struct dw_pcie *pci; 1717 struct qcom_pcie *pcie; 1718 const struct qcom_pcie_cfg *pcie_cfg; 1719 int ret; 1720 1721 pcie_cfg = of_device_get_match_data(dev); 1722 if (!pcie_cfg || !pcie_cfg->ops) { 1723 dev_err(dev, "Invalid platform data\n"); 1724 return -EINVAL; 1725 } 1726 1727 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1728 if (!pcie) 1729 return -ENOMEM; 1730 1731 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1732 if (!pci) 1733 return -ENOMEM; 1734 1735 pm_runtime_enable(dev); 1736 ret = pm_runtime_get_sync(dev); 1737 if (ret < 0) 1738 goto err_pm_runtime_put; 1739 1740 pci->dev = dev; 1741 pci->ops = &dw_pcie_ops; 1742 pp = &pci->pp; 1743 1744 pcie->pci = pci; 1745 1746 pcie->cfg = pcie_cfg; 1747 1748 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1749 if (IS_ERR(pcie->reset)) { 1750 ret = PTR_ERR(pcie->reset); 1751 goto err_pm_runtime_put; 1752 } 1753 1754 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1755 if (IS_ERR(pcie->parf)) { 1756 ret = PTR_ERR(pcie->parf); 1757 goto err_pm_runtime_put; 1758 } 1759 1760 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1761 if (IS_ERR(pcie->elbi)) { 1762 ret = PTR_ERR(pcie->elbi); 1763 goto err_pm_runtime_put; 1764 } 1765 1766 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1767 if (IS_ERR(pcie->phy)) { 1768 ret = PTR_ERR(pcie->phy); 1769 goto err_pm_runtime_put; 1770 } 1771 1772 ret = qcom_pcie_icc_init(pcie); 1773 if (ret) 1774 goto err_pm_runtime_put; 1775 1776 ret = pcie->cfg->ops->get_resources(pcie); 1777 if (ret) 1778 goto err_pm_runtime_put; 1779 1780 pp->ops = &qcom_pcie_dw_ops; 1781 1782 ret = phy_init(pcie->phy); 1783 if (ret) 1784 goto err_pm_runtime_put; 1785 1786 platform_set_drvdata(pdev, pcie); 1787 1788 ret = dw_pcie_host_init(pp); 1789 if (ret) { 1790 dev_err(dev, "cannot initialize host\n"); 1791 goto err_phy_exit; 1792 } 1793 1794 qcom_pcie_icc_update(pcie); 1795 1796 return 0; 1797 1798 err_phy_exit: 1799 phy_exit(pcie->phy); 1800 err_pm_runtime_put: 1801 pm_runtime_put(dev); 1802 pm_runtime_disable(dev); 1803 1804 return ret; 1805 } 1806 1807 static const struct of_device_id qcom_pcie_match[] = { 1808 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 1809 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 1810 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 1811 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 1812 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 1813 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 1814 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 1815 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 1816 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 1817 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 }, 1818 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 1819 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 1820 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 }, 1821 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 1822 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 1823 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 1824 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 1825 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 1826 { } 1827 }; 1828 1829 static void qcom_fixup_class(struct pci_dev *dev) 1830 { 1831 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 1832 } 1833 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 1834 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 1835 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 1836 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 1837 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 1838 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 1839 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 1840 1841 static struct platform_driver qcom_pcie_driver = { 1842 .probe = qcom_pcie_probe, 1843 .driver = { 1844 .name = "qcom-pcie", 1845 .suppress_bind_attrs = true, 1846 .of_match_table = qcom_pcie_match, 1847 }, 1848 }; 1849 builtin_platform_driver(qcom_pcie_driver); 1850