1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/interconnect.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/kernel.h> 21 #include <linux/init.h> 22 #include <linux/of.h> 23 #include <linux/of_gpio.h> 24 #include <linux/pci.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/platform_device.h> 27 #include <linux/phy/pcie.h> 28 #include <linux/phy/phy.h> 29 #include <linux/regulator/consumer.h> 30 #include <linux/reset.h> 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 34 #include "../../pci.h" 35 #include "pcie-designware.h" 36 37 /* PARF registers */ 38 #define PARF_SYS_CTRL 0x00 39 #define PARF_PM_CTRL 0x20 40 #define PARF_PCS_DEEMPH 0x34 41 #define PARF_PCS_SWING 0x38 42 #define PARF_PHY_CTRL 0x40 43 #define PARF_PHY_REFCLK 0x4c 44 #define PARF_CONFIG_BITS 0x50 45 #define PARF_DBI_BASE_ADDR 0x168 46 #define PARF_SLV_ADDR_SPACE_SIZE_2_3_3 0x16c /* Register offset specific to IP ver 2.3.3 */ 47 #define PARF_MHI_CLOCK_RESET_CTRL 0x174 48 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 49 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 50 #define PARF_Q2A_FLUSH 0x1ac 51 #define PARF_LTSSM 0x1b0 52 #define PARF_SID_OFFSET 0x234 53 #define PARF_BDF_TRANSLATE_CFG 0x24c 54 #define PARF_SLV_ADDR_SPACE_SIZE 0x358 55 #define PARF_DEVICE_TYPE 0x1000 56 #define PARF_BDF_TO_SID_TABLE_N 0x2000 57 58 /* ELBI registers */ 59 #define ELBI_SYS_CTRL 0x04 60 61 /* DBI registers */ 62 #define AXI_MSTR_RESP_COMP_CTRL0 0x818 63 #define AXI_MSTR_RESP_COMP_CTRL1 0x81c 64 65 /* MHI registers */ 66 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 67 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c 68 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 69 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 70 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 71 72 /* PARF_SYS_CTRL register fields */ 73 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) 74 #define MST_WAKEUP_EN BIT(13) 75 #define SLV_WAKEUP_EN BIT(12) 76 #define MSTR_ACLK_CGC_DIS BIT(10) 77 #define SLV_ACLK_CGC_DIS BIT(9) 78 #define CORE_CLK_CGC_DIS BIT(6) 79 #define AUX_PWR_DET BIT(4) 80 #define L23_CLK_RMV_DIS BIT(2) 81 #define L1_CLK_RMV_DIS BIT(1) 82 83 /* PARF_PM_CTRL register fields */ 84 #define REQ_NOT_ENTR_L1 BIT(5) 85 86 /* PARF_PCS_DEEMPH register fields */ 87 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x) 88 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x) 89 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x) 90 91 /* PARF_PCS_SWING register fields */ 92 #define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x) 93 #define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x) 94 95 /* PARF_PHY_CTRL register fields */ 96 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 97 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x) 98 #define PHY_TEST_PWR_DOWN BIT(0) 99 100 /* PARF_PHY_REFCLK register fields */ 101 #define PHY_REFCLK_SSP_EN BIT(16) 102 #define PHY_REFCLK_USE_PAD BIT(12) 103 104 /* PARF_CONFIG_BITS register fields */ 105 #define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x) 106 107 /* PARF_SLV_ADDR_SPACE_SIZE register value */ 108 #define SLV_ADDR_SPACE_SZ 0x10000000 109 110 /* PARF_MHI_CLOCK_RESET_CTRL register fields */ 111 #define AHB_CLK_EN BIT(0) 112 #define MSTR_AXI_CLK_EN BIT(1) 113 #define BYPASS BIT(4) 114 115 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ 116 #define EN BIT(31) 117 118 /* PARF_LTSSM register fields */ 119 #define LTSSM_EN BIT(8) 120 121 /* PARF_DEVICE_TYPE register fields */ 122 #define DEVICE_TYPE_RC 0x4 123 124 /* ELBI_SYS_CTRL register fields */ 125 #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) 126 127 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */ 128 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 129 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 130 131 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */ 132 #define CFG_BRIDGE_SB_INIT BIT(0) 133 134 /* PCI_EXP_SLTCAP register fields */ 135 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250) 136 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1) 137 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 138 PCI_EXP_SLTCAP_PCP | \ 139 PCI_EXP_SLTCAP_MRLSP | \ 140 PCI_EXP_SLTCAP_AIP | \ 141 PCI_EXP_SLTCAP_PIP | \ 142 PCI_EXP_SLTCAP_HPS | \ 143 PCI_EXP_SLTCAP_EIP | \ 144 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 145 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 146 147 #define PERST_DELAY_US 1000 148 149 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 150 151 #define QCOM_PCIE_1_0_0_MAX_CLOCKS 4 152 struct qcom_pcie_resources_1_0_0 { 153 struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS]; 154 struct reset_control *core; 155 struct regulator *vdda; 156 }; 157 158 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 159 #define QCOM_PCIE_2_1_0_MAX_RESETS 6 160 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 161 struct qcom_pcie_resources_2_1_0 { 162 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; 163 struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS]; 164 int num_resets; 165 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 166 }; 167 168 #define QCOM_PCIE_2_3_2_MAX_CLOCKS 4 169 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 170 struct qcom_pcie_resources_2_3_2 { 171 struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS]; 172 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 173 }; 174 175 #define QCOM_PCIE_2_3_3_MAX_CLOCKS 5 176 #define QCOM_PCIE_2_3_3_MAX_RESETS 7 177 struct qcom_pcie_resources_2_3_3 { 178 struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS]; 179 struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS]; 180 }; 181 182 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 183 #define QCOM_PCIE_2_4_0_MAX_RESETS 12 184 struct qcom_pcie_resources_2_4_0 { 185 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; 186 int num_clks; 187 struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS]; 188 int num_resets; 189 }; 190 191 #define QCOM_PCIE_2_7_0_MAX_CLOCKS 15 192 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2 193 struct qcom_pcie_resources_2_7_0 { 194 struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS]; 195 int num_clks; 196 struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES]; 197 struct reset_control *rst; 198 }; 199 200 #define QCOM_PCIE_2_9_0_MAX_CLOCKS 5 201 struct qcom_pcie_resources_2_9_0 { 202 struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS]; 203 struct reset_control *rst; 204 }; 205 206 union qcom_pcie_resources { 207 struct qcom_pcie_resources_1_0_0 v1_0_0; 208 struct qcom_pcie_resources_2_1_0 v2_1_0; 209 struct qcom_pcie_resources_2_3_2 v2_3_2; 210 struct qcom_pcie_resources_2_3_3 v2_3_3; 211 struct qcom_pcie_resources_2_4_0 v2_4_0; 212 struct qcom_pcie_resources_2_7_0 v2_7_0; 213 struct qcom_pcie_resources_2_9_0 v2_9_0; 214 }; 215 216 struct qcom_pcie; 217 218 struct qcom_pcie_ops { 219 int (*get_resources)(struct qcom_pcie *pcie); 220 int (*init)(struct qcom_pcie *pcie); 221 int (*post_init)(struct qcom_pcie *pcie); 222 void (*deinit)(struct qcom_pcie *pcie); 223 void (*ltssm_enable)(struct qcom_pcie *pcie); 224 int (*config_sid)(struct qcom_pcie *pcie); 225 }; 226 227 struct qcom_pcie_cfg { 228 const struct qcom_pcie_ops *ops; 229 }; 230 231 struct qcom_pcie { 232 struct dw_pcie *pci; 233 void __iomem *parf; /* DT parf */ 234 void __iomem *elbi; /* DT elbi */ 235 void __iomem *mhi; 236 union qcom_pcie_resources res; 237 struct phy *phy; 238 struct gpio_desc *reset; 239 struct icc_path *icc_mem; 240 const struct qcom_pcie_cfg *cfg; 241 struct dentry *debugfs; 242 bool suspended; 243 }; 244 245 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 246 247 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 248 { 249 gpiod_set_value_cansleep(pcie->reset, 1); 250 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 251 } 252 253 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 254 { 255 /* Ensure that PERST has been asserted for at least 100 ms */ 256 msleep(100); 257 gpiod_set_value_cansleep(pcie->reset, 0); 258 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 259 } 260 261 static int qcom_pcie_start_link(struct dw_pcie *pci) 262 { 263 struct qcom_pcie *pcie = to_qcom_pcie(pci); 264 265 /* Enable Link Training state machine */ 266 if (pcie->cfg->ops->ltssm_enable) 267 pcie->cfg->ops->ltssm_enable(pcie); 268 269 return 0; 270 } 271 272 static void qcom_pcie_clear_hpc(struct dw_pcie *pci) 273 { 274 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 275 u32 val; 276 277 dw_pcie_dbi_ro_wr_en(pci); 278 279 val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); 280 val &= ~PCI_EXP_SLTCAP_HPC; 281 writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); 282 283 dw_pcie_dbi_ro_wr_dis(pci); 284 } 285 286 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 287 { 288 u32 val; 289 290 /* enable link training */ 291 val = readl(pcie->elbi + ELBI_SYS_CTRL); 292 val |= ELBI_SYS_CTRL_LT_ENABLE; 293 writel(val, pcie->elbi + ELBI_SYS_CTRL); 294 } 295 296 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 297 { 298 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 299 struct dw_pcie *pci = pcie->pci; 300 struct device *dev = pci->dev; 301 bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064"); 302 int ret; 303 304 res->supplies[0].supply = "vdda"; 305 res->supplies[1].supply = "vdda_phy"; 306 res->supplies[2].supply = "vdda_refclk"; 307 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 308 res->supplies); 309 if (ret) 310 return ret; 311 312 res->clks[0].id = "iface"; 313 res->clks[1].id = "core"; 314 res->clks[2].id = "phy"; 315 res->clks[3].id = "aux"; 316 res->clks[4].id = "ref"; 317 318 /* iface, core, phy are required */ 319 ret = devm_clk_bulk_get(dev, 3, res->clks); 320 if (ret < 0) 321 return ret; 322 323 /* aux, ref are optional */ 324 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); 325 if (ret < 0) 326 return ret; 327 328 res->resets[0].id = "pci"; 329 res->resets[1].id = "axi"; 330 res->resets[2].id = "ahb"; 331 res->resets[3].id = "por"; 332 res->resets[4].id = "phy"; 333 res->resets[5].id = "ext"; 334 335 /* ext is optional on APQ8016 */ 336 res->num_resets = is_apq ? 5 : 6; 337 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 338 if (ret < 0) 339 return ret; 340 341 return 0; 342 } 343 344 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 345 { 346 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 347 348 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 349 reset_control_bulk_assert(res->num_resets, res->resets); 350 351 writel(1, pcie->parf + PARF_PHY_CTRL); 352 353 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 354 } 355 356 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 357 { 358 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 359 struct dw_pcie *pci = pcie->pci; 360 struct device *dev = pci->dev; 361 int ret; 362 363 /* reset the PCIe interface as uboot can leave it undefined state */ 364 ret = reset_control_bulk_assert(res->num_resets, res->resets); 365 if (ret < 0) { 366 dev_err(dev, "cannot assert resets\n"); 367 return ret; 368 } 369 370 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 371 if (ret < 0) { 372 dev_err(dev, "cannot enable regulators\n"); 373 return ret; 374 } 375 376 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 377 if (ret < 0) { 378 dev_err(dev, "cannot deassert resets\n"); 379 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 380 return ret; 381 } 382 383 return 0; 384 } 385 386 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 387 { 388 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 389 struct dw_pcie *pci = pcie->pci; 390 struct device *dev = pci->dev; 391 struct device_node *node = dev->of_node; 392 u32 val; 393 int ret; 394 395 /* enable PCIe clocks and resets */ 396 val = readl(pcie->parf + PARF_PHY_CTRL); 397 val &= ~PHY_TEST_PWR_DOWN; 398 writel(val, pcie->parf + PARF_PHY_CTRL); 399 400 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 401 if (ret) 402 return ret; 403 404 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 405 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 406 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 407 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 408 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 409 pcie->parf + PARF_PCS_DEEMPH); 410 writel(PCS_SWING_TX_SWING_FULL(120) | 411 PCS_SWING_TX_SWING_LOW(120), 412 pcie->parf + PARF_PCS_SWING); 413 writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS); 414 } 415 416 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 417 /* set TX termination offset */ 418 val = readl(pcie->parf + PARF_PHY_CTRL); 419 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 420 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 421 writel(val, pcie->parf + PARF_PHY_CTRL); 422 } 423 424 /* enable external reference clock */ 425 val = readl(pcie->parf + PARF_PHY_REFCLK); 426 /* USE_PAD is required only for ipq806x */ 427 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 428 val &= ~PHY_REFCLK_USE_PAD; 429 val |= PHY_REFCLK_SSP_EN; 430 writel(val, pcie->parf + PARF_PHY_REFCLK); 431 432 /* wait for clock acquisition */ 433 usleep_range(1000, 1500); 434 435 /* Set the Max TLP size to 2K, instead of using default of 4K */ 436 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 437 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0); 438 writel(CFG_BRIDGE_SB_INIT, 439 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); 440 441 qcom_pcie_clear_hpc(pcie->pci); 442 443 return 0; 444 } 445 446 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 447 { 448 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 449 struct dw_pcie *pci = pcie->pci; 450 struct device *dev = pci->dev; 451 int ret; 452 453 res->vdda = devm_regulator_get(dev, "vdda"); 454 if (IS_ERR(res->vdda)) 455 return PTR_ERR(res->vdda); 456 457 res->clks[0].id = "iface"; 458 res->clks[1].id = "aux"; 459 res->clks[2].id = "master_bus"; 460 res->clks[3].id = "slave_bus"; 461 462 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 463 if (ret < 0) 464 return ret; 465 466 res->core = devm_reset_control_get_exclusive(dev, "core"); 467 return PTR_ERR_OR_ZERO(res->core); 468 } 469 470 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 471 { 472 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 473 474 reset_control_assert(res->core); 475 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 476 regulator_disable(res->vdda); 477 } 478 479 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 480 { 481 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 482 struct dw_pcie *pci = pcie->pci; 483 struct device *dev = pci->dev; 484 int ret; 485 486 ret = reset_control_deassert(res->core); 487 if (ret) { 488 dev_err(dev, "cannot deassert core reset\n"); 489 return ret; 490 } 491 492 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 493 if (ret) { 494 dev_err(dev, "cannot prepare/enable clocks\n"); 495 goto err_assert_reset; 496 } 497 498 ret = regulator_enable(res->vdda); 499 if (ret) { 500 dev_err(dev, "cannot enable vdda regulator\n"); 501 goto err_disable_clks; 502 } 503 504 return 0; 505 506 err_disable_clks: 507 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 508 err_assert_reset: 509 reset_control_assert(res->core); 510 511 return ret; 512 } 513 514 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 515 { 516 /* change DBI base address */ 517 writel(0, pcie->parf + PARF_DBI_BASE_ADDR); 518 519 if (IS_ENABLED(CONFIG_PCI_MSI)) { 520 u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 521 522 val |= EN; 523 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 524 } 525 526 qcom_pcie_clear_hpc(pcie->pci); 527 528 return 0; 529 } 530 531 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 532 { 533 u32 val; 534 535 /* enable link training */ 536 val = readl(pcie->parf + PARF_LTSSM); 537 val |= LTSSM_EN; 538 writel(val, pcie->parf + PARF_LTSSM); 539 } 540 541 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 542 { 543 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 544 struct dw_pcie *pci = pcie->pci; 545 struct device *dev = pci->dev; 546 int ret; 547 548 res->supplies[0].supply = "vdda"; 549 res->supplies[1].supply = "vddpe-3v3"; 550 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 551 res->supplies); 552 if (ret) 553 return ret; 554 555 res->clks[0].id = "aux"; 556 res->clks[1].id = "cfg"; 557 res->clks[2].id = "bus_master"; 558 res->clks[3].id = "bus_slave"; 559 560 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 561 if (ret < 0) 562 return ret; 563 564 return 0; 565 } 566 567 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 568 { 569 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 570 571 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 572 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 573 } 574 575 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 576 { 577 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 578 struct dw_pcie *pci = pcie->pci; 579 struct device *dev = pci->dev; 580 int ret; 581 582 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 583 if (ret < 0) { 584 dev_err(dev, "cannot enable regulators\n"); 585 return ret; 586 } 587 588 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 589 if (ret) { 590 dev_err(dev, "cannot prepare/enable clocks\n"); 591 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 592 return ret; 593 } 594 595 return 0; 596 } 597 598 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 599 { 600 u32 val; 601 602 /* enable PCIe clocks and resets */ 603 val = readl(pcie->parf + PARF_PHY_CTRL); 604 val &= ~PHY_TEST_PWR_DOWN; 605 writel(val, pcie->parf + PARF_PHY_CTRL); 606 607 /* change DBI base address */ 608 writel(0, pcie->parf + PARF_DBI_BASE_ADDR); 609 610 /* MAC PHY_POWERDOWN MUX DISABLE */ 611 val = readl(pcie->parf + PARF_SYS_CTRL); 612 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 613 writel(val, pcie->parf + PARF_SYS_CTRL); 614 615 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 616 val |= BYPASS; 617 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 618 619 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 620 val |= EN; 621 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 622 623 qcom_pcie_clear_hpc(pcie->pci); 624 625 return 0; 626 } 627 628 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 629 { 630 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 631 struct dw_pcie *pci = pcie->pci; 632 struct device *dev = pci->dev; 633 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 634 int ret; 635 636 res->clks[0].id = "aux"; 637 res->clks[1].id = "master_bus"; 638 res->clks[2].id = "slave_bus"; 639 res->clks[3].id = "iface"; 640 641 /* qcom,pcie-ipq4019 is defined without "iface" */ 642 res->num_clks = is_ipq ? 3 : 4; 643 644 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 645 if (ret < 0) 646 return ret; 647 648 res->resets[0].id = "axi_m"; 649 res->resets[1].id = "axi_s"; 650 res->resets[2].id = "axi_m_sticky"; 651 res->resets[3].id = "pipe_sticky"; 652 res->resets[4].id = "pwr"; 653 res->resets[5].id = "ahb"; 654 res->resets[6].id = "pipe"; 655 res->resets[7].id = "axi_m_vmid"; 656 res->resets[8].id = "axi_s_xpu"; 657 res->resets[9].id = "parf"; 658 res->resets[10].id = "phy"; 659 res->resets[11].id = "phy_ahb"; 660 661 res->num_resets = is_ipq ? 12 : 6; 662 663 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 664 if (ret < 0) 665 return ret; 666 667 return 0; 668 } 669 670 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 671 { 672 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 673 674 reset_control_bulk_assert(res->num_resets, res->resets); 675 clk_bulk_disable_unprepare(res->num_clks, res->clks); 676 } 677 678 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 679 { 680 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 681 struct dw_pcie *pci = pcie->pci; 682 struct device *dev = pci->dev; 683 int ret; 684 685 ret = reset_control_bulk_assert(res->num_resets, res->resets); 686 if (ret < 0) { 687 dev_err(dev, "cannot assert resets\n"); 688 return ret; 689 } 690 691 usleep_range(10000, 12000); 692 693 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 694 if (ret < 0) { 695 dev_err(dev, "cannot deassert resets\n"); 696 return ret; 697 } 698 699 usleep_range(10000, 12000); 700 701 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 702 if (ret) { 703 reset_control_bulk_assert(res->num_resets, res->resets); 704 return ret; 705 } 706 707 return 0; 708 } 709 710 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 711 { 712 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 713 struct dw_pcie *pci = pcie->pci; 714 struct device *dev = pci->dev; 715 int ret; 716 717 res->clks[0].id = "iface"; 718 res->clks[1].id = "axi_m"; 719 res->clks[2].id = "axi_s"; 720 res->clks[3].id = "ahb"; 721 res->clks[4].id = "aux"; 722 723 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 724 if (ret < 0) 725 return ret; 726 727 res->rst[0].id = "axi_m"; 728 res->rst[1].id = "axi_s"; 729 res->rst[2].id = "pipe"; 730 res->rst[3].id = "axi_m_sticky"; 731 res->rst[4].id = "sticky"; 732 res->rst[5].id = "ahb"; 733 res->rst[6].id = "sleep"; 734 735 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); 736 if (ret < 0) 737 return ret; 738 739 return 0; 740 } 741 742 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 743 { 744 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 745 746 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 747 } 748 749 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 750 { 751 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 752 struct dw_pcie *pci = pcie->pci; 753 struct device *dev = pci->dev; 754 int ret; 755 756 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 757 if (ret < 0) { 758 dev_err(dev, "cannot assert resets\n"); 759 return ret; 760 } 761 762 usleep_range(2000, 2500); 763 764 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); 765 if (ret < 0) { 766 dev_err(dev, "cannot deassert resets\n"); 767 return ret; 768 } 769 770 /* 771 * Don't have a way to see if the reset has completed. 772 * Wait for some time. 773 */ 774 usleep_range(2000, 2500); 775 776 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 777 if (ret) { 778 dev_err(dev, "cannot prepare/enable clocks\n"); 779 goto err_assert_resets; 780 } 781 782 return 0; 783 784 err_assert_resets: 785 /* 786 * Not checking for failure, will anyway return 787 * the original failure in 'ret'. 788 */ 789 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 790 791 return ret; 792 } 793 794 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 795 { 796 struct dw_pcie *pci = pcie->pci; 797 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 798 u32 val; 799 800 writel(SLV_ADDR_SPACE_SZ, 801 pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3); 802 803 val = readl(pcie->parf + PARF_PHY_CTRL); 804 val &= ~PHY_TEST_PWR_DOWN; 805 writel(val, pcie->parf + PARF_PHY_CTRL); 806 807 writel(0, pcie->parf + PARF_DBI_BASE_ADDR); 808 809 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 810 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 811 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 812 pcie->parf + PARF_SYS_CTRL); 813 writel(0, pcie->parf + PARF_Q2A_FLUSH); 814 815 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 816 817 dw_pcie_dbi_ro_wr_en(pci); 818 819 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 820 821 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 822 val &= ~PCI_EXP_LNKCAP_ASPMS; 823 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 824 825 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 826 PCI_EXP_DEVCTL2); 827 828 dw_pcie_dbi_ro_wr_dis(pci); 829 830 return 0; 831 } 832 833 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 834 { 835 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 836 struct dw_pcie *pci = pcie->pci; 837 struct device *dev = pci->dev; 838 unsigned int num_clks, num_opt_clks; 839 unsigned int idx; 840 int ret; 841 842 res->rst = devm_reset_control_array_get_exclusive(dev); 843 if (IS_ERR(res->rst)) 844 return PTR_ERR(res->rst); 845 846 res->supplies[0].supply = "vdda"; 847 res->supplies[1].supply = "vddpe-3v3"; 848 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 849 res->supplies); 850 if (ret) 851 return ret; 852 853 idx = 0; 854 res->clks[idx++].id = "aux"; 855 res->clks[idx++].id = "cfg"; 856 res->clks[idx++].id = "bus_master"; 857 res->clks[idx++].id = "bus_slave"; 858 res->clks[idx++].id = "slave_q2a"; 859 860 num_clks = idx; 861 862 ret = devm_clk_bulk_get(dev, num_clks, res->clks); 863 if (ret < 0) 864 return ret; 865 866 res->clks[idx++].id = "tbu"; 867 res->clks[idx++].id = "ddrss_sf_tbu"; 868 res->clks[idx++].id = "aggre0"; 869 res->clks[idx++].id = "aggre1"; 870 res->clks[idx++].id = "noc_aggr"; 871 res->clks[idx++].id = "noc_aggr_4"; 872 res->clks[idx++].id = "noc_aggr_south_sf"; 873 res->clks[idx++].id = "cnoc_qx"; 874 res->clks[idx++].id = "sleep"; 875 res->clks[idx++].id = "cnoc_sf_axi"; 876 877 num_opt_clks = idx - num_clks; 878 res->num_clks = idx; 879 880 ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks); 881 if (ret < 0) 882 return ret; 883 884 return 0; 885 } 886 887 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 888 { 889 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 890 struct dw_pcie *pci = pcie->pci; 891 struct device *dev = pci->dev; 892 u32 val; 893 int ret; 894 895 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 896 if (ret < 0) { 897 dev_err(dev, "cannot enable regulators\n"); 898 return ret; 899 } 900 901 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 902 if (ret < 0) 903 goto err_disable_regulators; 904 905 ret = reset_control_assert(res->rst); 906 if (ret) { 907 dev_err(dev, "reset assert failed (%d)\n", ret); 908 goto err_disable_clocks; 909 } 910 911 usleep_range(1000, 1500); 912 913 ret = reset_control_deassert(res->rst); 914 if (ret) { 915 dev_err(dev, "reset deassert failed (%d)\n", ret); 916 goto err_disable_clocks; 917 } 918 919 /* Wait for reset to complete, required on SM8450 */ 920 usleep_range(1000, 1500); 921 922 /* configure PCIe to RC mode */ 923 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 924 925 /* enable PCIe clocks and resets */ 926 val = readl(pcie->parf + PARF_PHY_CTRL); 927 val &= ~PHY_TEST_PWR_DOWN; 928 writel(val, pcie->parf + PARF_PHY_CTRL); 929 930 /* change DBI base address */ 931 writel(0, pcie->parf + PARF_DBI_BASE_ADDR); 932 933 /* MAC PHY_POWERDOWN MUX DISABLE */ 934 val = readl(pcie->parf + PARF_SYS_CTRL); 935 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 936 writel(val, pcie->parf + PARF_SYS_CTRL); 937 938 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 939 val |= BYPASS; 940 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 941 942 /* Enable L1 and L1SS */ 943 val = readl(pcie->parf + PARF_PM_CTRL); 944 val &= ~REQ_NOT_ENTR_L1; 945 writel(val, pcie->parf + PARF_PM_CTRL); 946 947 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 948 val |= EN; 949 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 950 951 return 0; 952 err_disable_clocks: 953 clk_bulk_disable_unprepare(res->num_clks, res->clks); 954 err_disable_regulators: 955 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 956 957 return ret; 958 } 959 960 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) 961 { 962 qcom_pcie_clear_hpc(pcie->pci); 963 964 return 0; 965 } 966 967 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 968 { 969 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 970 971 clk_bulk_disable_unprepare(res->num_clks, res->clks); 972 973 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 974 } 975 976 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) 977 { 978 /* iommu map structure */ 979 struct { 980 u32 bdf; 981 u32 phandle; 982 u32 smmu_sid; 983 u32 smmu_sid_len; 984 } *map; 985 void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; 986 struct device *dev = pcie->pci->dev; 987 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 988 int i, nr_map, size = 0; 989 u32 smmu_sid_base; 990 991 of_get_property(dev->of_node, "iommu-map", &size); 992 if (!size) 993 return 0; 994 995 map = kzalloc(size, GFP_KERNEL); 996 if (!map) 997 return -ENOMEM; 998 999 of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, 1000 size / sizeof(u32)); 1001 1002 nr_map = size / (sizeof(*map)); 1003 1004 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1005 1006 /* Registers need to be zero out first */ 1007 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1008 1009 /* Extract the SMMU SID base from the first entry of iommu-map */ 1010 smmu_sid_base = map[0].smmu_sid; 1011 1012 /* Look for an available entry to hold the mapping */ 1013 for (i = 0; i < nr_map; i++) { 1014 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1015 u32 val; 1016 u8 hash; 1017 1018 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0); 1019 1020 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1021 1022 /* If the register is already populated, look for next available entry */ 1023 while (val) { 1024 u8 current_hash = hash++; 1025 u8 next_mask = 0xff; 1026 1027 /* If NEXT field is NULL then update it with next hash */ 1028 if (!(val & next_mask)) { 1029 val |= (u32)hash; 1030 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1031 } 1032 1033 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1034 } 1035 1036 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1037 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1038 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1039 } 1040 1041 kfree(map); 1042 1043 return 0; 1044 } 1045 1046 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1047 { 1048 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1049 struct dw_pcie *pci = pcie->pci; 1050 struct device *dev = pci->dev; 1051 int ret; 1052 1053 res->clks[0].id = "iface"; 1054 res->clks[1].id = "axi_m"; 1055 res->clks[2].id = "axi_s"; 1056 res->clks[3].id = "axi_bridge"; 1057 res->clks[4].id = "rchng"; 1058 1059 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 1060 if (ret < 0) 1061 return ret; 1062 1063 res->rst = devm_reset_control_array_get_exclusive(dev); 1064 if (IS_ERR(res->rst)) 1065 return PTR_ERR(res->rst); 1066 1067 return 0; 1068 } 1069 1070 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1071 { 1072 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1073 1074 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 1075 } 1076 1077 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1078 { 1079 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1080 struct device *dev = pcie->pci->dev; 1081 int ret; 1082 1083 ret = reset_control_assert(res->rst); 1084 if (ret) { 1085 dev_err(dev, "reset assert failed (%d)\n", ret); 1086 return ret; 1087 } 1088 1089 /* 1090 * Delay periods before and after reset deassert are working values 1091 * from downstream Codeaurora kernel 1092 */ 1093 usleep_range(2000, 2500); 1094 1095 ret = reset_control_deassert(res->rst); 1096 if (ret) { 1097 dev_err(dev, "reset deassert failed (%d)\n", ret); 1098 return ret; 1099 } 1100 1101 usleep_range(2000, 2500); 1102 1103 return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 1104 } 1105 1106 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1107 { 1108 struct dw_pcie *pci = pcie->pci; 1109 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1110 u32 val; 1111 int i; 1112 1113 writel(SLV_ADDR_SPACE_SZ, 1114 pcie->parf + PARF_SLV_ADDR_SPACE_SIZE); 1115 1116 val = readl(pcie->parf + PARF_PHY_CTRL); 1117 val &= ~PHY_TEST_PWR_DOWN; 1118 writel(val, pcie->parf + PARF_PHY_CTRL); 1119 1120 writel(0, pcie->parf + PARF_DBI_BASE_ADDR); 1121 1122 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 1123 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1124 pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1125 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1126 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1127 pci->dbi_base + GEN3_RELATED_OFF); 1128 1129 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1130 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1131 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1132 pcie->parf + PARF_SYS_CTRL); 1133 1134 writel(0, pcie->parf + PARF_Q2A_FLUSH); 1135 1136 dw_pcie_dbi_ro_wr_en(pci); 1137 1138 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1139 1140 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1141 val &= ~PCI_EXP_LNKCAP_ASPMS; 1142 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1143 1144 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1145 PCI_EXP_DEVCTL2); 1146 1147 dw_pcie_dbi_ro_wr_dis(pci); 1148 1149 for (i = 0; i < 256; i++) 1150 writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1151 1152 return 0; 1153 } 1154 1155 static int qcom_pcie_link_up(struct dw_pcie *pci) 1156 { 1157 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1158 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1159 1160 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1161 } 1162 1163 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1164 { 1165 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1166 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1167 int ret; 1168 1169 qcom_ep_reset_assert(pcie); 1170 1171 ret = pcie->cfg->ops->init(pcie); 1172 if (ret) 1173 return ret; 1174 1175 ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 1176 if (ret) 1177 goto err_deinit; 1178 1179 ret = phy_power_on(pcie->phy); 1180 if (ret) 1181 goto err_deinit; 1182 1183 if (pcie->cfg->ops->post_init) { 1184 ret = pcie->cfg->ops->post_init(pcie); 1185 if (ret) 1186 goto err_disable_phy; 1187 } 1188 1189 qcom_ep_reset_deassert(pcie); 1190 1191 if (pcie->cfg->ops->config_sid) { 1192 ret = pcie->cfg->ops->config_sid(pcie); 1193 if (ret) 1194 goto err_assert_reset; 1195 } 1196 1197 return 0; 1198 1199 err_assert_reset: 1200 qcom_ep_reset_assert(pcie); 1201 err_disable_phy: 1202 phy_power_off(pcie->phy); 1203 err_deinit: 1204 pcie->cfg->ops->deinit(pcie); 1205 1206 return ret; 1207 } 1208 1209 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) 1210 { 1211 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1212 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1213 1214 qcom_ep_reset_assert(pcie); 1215 phy_power_off(pcie->phy); 1216 pcie->cfg->ops->deinit(pcie); 1217 } 1218 1219 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1220 .host_init = qcom_pcie_host_init, 1221 .host_deinit = qcom_pcie_host_deinit, 1222 }; 1223 1224 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1225 static const struct qcom_pcie_ops ops_2_1_0 = { 1226 .get_resources = qcom_pcie_get_resources_2_1_0, 1227 .init = qcom_pcie_init_2_1_0, 1228 .post_init = qcom_pcie_post_init_2_1_0, 1229 .deinit = qcom_pcie_deinit_2_1_0, 1230 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1231 }; 1232 1233 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1234 static const struct qcom_pcie_ops ops_1_0_0 = { 1235 .get_resources = qcom_pcie_get_resources_1_0_0, 1236 .init = qcom_pcie_init_1_0_0, 1237 .post_init = qcom_pcie_post_init_1_0_0, 1238 .deinit = qcom_pcie_deinit_1_0_0, 1239 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1240 }; 1241 1242 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1243 static const struct qcom_pcie_ops ops_2_3_2 = { 1244 .get_resources = qcom_pcie_get_resources_2_3_2, 1245 .init = qcom_pcie_init_2_3_2, 1246 .post_init = qcom_pcie_post_init_2_3_2, 1247 .deinit = qcom_pcie_deinit_2_3_2, 1248 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1249 }; 1250 1251 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1252 static const struct qcom_pcie_ops ops_2_4_0 = { 1253 .get_resources = qcom_pcie_get_resources_2_4_0, 1254 .init = qcom_pcie_init_2_4_0, 1255 .post_init = qcom_pcie_post_init_2_3_2, 1256 .deinit = qcom_pcie_deinit_2_4_0, 1257 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1258 }; 1259 1260 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1261 static const struct qcom_pcie_ops ops_2_3_3 = { 1262 .get_resources = qcom_pcie_get_resources_2_3_3, 1263 .init = qcom_pcie_init_2_3_3, 1264 .post_init = qcom_pcie_post_init_2_3_3, 1265 .deinit = qcom_pcie_deinit_2_3_3, 1266 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1267 }; 1268 1269 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1270 static const struct qcom_pcie_ops ops_2_7_0 = { 1271 .get_resources = qcom_pcie_get_resources_2_7_0, 1272 .init = qcom_pcie_init_2_7_0, 1273 .post_init = qcom_pcie_post_init_2_7_0, 1274 .deinit = qcom_pcie_deinit_2_7_0, 1275 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1276 }; 1277 1278 /* Qcom IP rev.: 1.9.0 */ 1279 static const struct qcom_pcie_ops ops_1_9_0 = { 1280 .get_resources = qcom_pcie_get_resources_2_7_0, 1281 .init = qcom_pcie_init_2_7_0, 1282 .post_init = qcom_pcie_post_init_2_7_0, 1283 .deinit = qcom_pcie_deinit_2_7_0, 1284 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1285 .config_sid = qcom_pcie_config_sid_1_9_0, 1286 }; 1287 1288 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1289 static const struct qcom_pcie_ops ops_2_9_0 = { 1290 .get_resources = qcom_pcie_get_resources_2_9_0, 1291 .init = qcom_pcie_init_2_9_0, 1292 .post_init = qcom_pcie_post_init_2_9_0, 1293 .deinit = qcom_pcie_deinit_2_9_0, 1294 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1295 }; 1296 1297 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1298 .ops = &ops_1_0_0, 1299 }; 1300 1301 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1302 .ops = &ops_1_9_0, 1303 }; 1304 1305 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1306 .ops = &ops_2_1_0, 1307 }; 1308 1309 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1310 .ops = &ops_2_3_2, 1311 }; 1312 1313 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1314 .ops = &ops_2_3_3, 1315 }; 1316 1317 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1318 .ops = &ops_2_4_0, 1319 }; 1320 1321 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1322 .ops = &ops_2_7_0, 1323 }; 1324 1325 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1326 .ops = &ops_2_9_0, 1327 }; 1328 1329 static const struct dw_pcie_ops dw_pcie_ops = { 1330 .link_up = qcom_pcie_link_up, 1331 .start_link = qcom_pcie_start_link, 1332 }; 1333 1334 static int qcom_pcie_icc_init(struct qcom_pcie *pcie) 1335 { 1336 struct dw_pcie *pci = pcie->pci; 1337 int ret; 1338 1339 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); 1340 if (IS_ERR(pcie->icc_mem)) 1341 return PTR_ERR(pcie->icc_mem); 1342 1343 /* 1344 * Some Qualcomm platforms require interconnect bandwidth constraints 1345 * to be set before enabling interconnect clocks. 1346 * 1347 * Set an initial peak bandwidth corresponding to single-lane Gen 1 1348 * for the pcie-mem path. 1349 */ 1350 ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250)); 1351 if (ret) { 1352 dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", 1353 ret); 1354 return ret; 1355 } 1356 1357 return 0; 1358 } 1359 1360 static void qcom_pcie_icc_update(struct qcom_pcie *pcie) 1361 { 1362 struct dw_pcie *pci = pcie->pci; 1363 u32 offset, status, bw; 1364 int speed, width; 1365 int ret; 1366 1367 if (!pcie->icc_mem) 1368 return; 1369 1370 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1371 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1372 1373 /* Only update constraints if link is up. */ 1374 if (!(status & PCI_EXP_LNKSTA_DLLLA)) 1375 return; 1376 1377 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); 1378 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); 1379 1380 switch (speed) { 1381 case 1: 1382 bw = MBps_to_icc(250); 1383 break; 1384 case 2: 1385 bw = MBps_to_icc(500); 1386 break; 1387 default: 1388 WARN_ON_ONCE(1); 1389 fallthrough; 1390 case 3: 1391 bw = MBps_to_icc(985); 1392 break; 1393 } 1394 1395 ret = icc_set_bw(pcie->icc_mem, 0, width * bw); 1396 if (ret) { 1397 dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", 1398 ret); 1399 } 1400 } 1401 1402 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data) 1403 { 1404 struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); 1405 1406 seq_printf(s, "L0s transition count: %u\n", 1407 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); 1408 1409 seq_printf(s, "L1 transition count: %u\n", 1410 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); 1411 1412 seq_printf(s, "L1.1 transition count: %u\n", 1413 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); 1414 1415 seq_printf(s, "L1.2 transition count: %u\n", 1416 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); 1417 1418 seq_printf(s, "L2 transition count: %u\n", 1419 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); 1420 1421 return 0; 1422 } 1423 1424 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie) 1425 { 1426 struct dw_pcie *pci = pcie->pci; 1427 struct device *dev = pci->dev; 1428 char *name; 1429 1430 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1431 if (!name) 1432 return; 1433 1434 pcie->debugfs = debugfs_create_dir(name, NULL); 1435 debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, 1436 qcom_pcie_link_transition_count); 1437 } 1438 1439 static int qcom_pcie_probe(struct platform_device *pdev) 1440 { 1441 const struct qcom_pcie_cfg *pcie_cfg; 1442 struct device *dev = &pdev->dev; 1443 struct qcom_pcie *pcie; 1444 struct dw_pcie_rp *pp; 1445 struct resource *res; 1446 struct dw_pcie *pci; 1447 int ret; 1448 1449 pcie_cfg = of_device_get_match_data(dev); 1450 if (!pcie_cfg || !pcie_cfg->ops) { 1451 dev_err(dev, "Invalid platform data\n"); 1452 return -EINVAL; 1453 } 1454 1455 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1456 if (!pcie) 1457 return -ENOMEM; 1458 1459 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1460 if (!pci) 1461 return -ENOMEM; 1462 1463 pm_runtime_enable(dev); 1464 ret = pm_runtime_get_sync(dev); 1465 if (ret < 0) 1466 goto err_pm_runtime_put; 1467 1468 pci->dev = dev; 1469 pci->ops = &dw_pcie_ops; 1470 pp = &pci->pp; 1471 1472 pcie->pci = pci; 1473 1474 pcie->cfg = pcie_cfg; 1475 1476 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1477 if (IS_ERR(pcie->reset)) { 1478 ret = PTR_ERR(pcie->reset); 1479 goto err_pm_runtime_put; 1480 } 1481 1482 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1483 if (IS_ERR(pcie->parf)) { 1484 ret = PTR_ERR(pcie->parf); 1485 goto err_pm_runtime_put; 1486 } 1487 1488 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1489 if (IS_ERR(pcie->elbi)) { 1490 ret = PTR_ERR(pcie->elbi); 1491 goto err_pm_runtime_put; 1492 } 1493 1494 /* MHI region is optional */ 1495 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi"); 1496 if (res) { 1497 pcie->mhi = devm_ioremap_resource(dev, res); 1498 if (IS_ERR(pcie->mhi)) { 1499 ret = PTR_ERR(pcie->mhi); 1500 goto err_pm_runtime_put; 1501 } 1502 } 1503 1504 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1505 if (IS_ERR(pcie->phy)) { 1506 ret = PTR_ERR(pcie->phy); 1507 goto err_pm_runtime_put; 1508 } 1509 1510 ret = qcom_pcie_icc_init(pcie); 1511 if (ret) 1512 goto err_pm_runtime_put; 1513 1514 ret = pcie->cfg->ops->get_resources(pcie); 1515 if (ret) 1516 goto err_pm_runtime_put; 1517 1518 pp->ops = &qcom_pcie_dw_ops; 1519 1520 ret = phy_init(pcie->phy); 1521 if (ret) 1522 goto err_pm_runtime_put; 1523 1524 platform_set_drvdata(pdev, pcie); 1525 1526 ret = dw_pcie_host_init(pp); 1527 if (ret) { 1528 dev_err(dev, "cannot initialize host\n"); 1529 goto err_phy_exit; 1530 } 1531 1532 qcom_pcie_icc_update(pcie); 1533 1534 if (pcie->mhi) 1535 qcom_pcie_init_debugfs(pcie); 1536 1537 return 0; 1538 1539 err_phy_exit: 1540 phy_exit(pcie->phy); 1541 err_pm_runtime_put: 1542 pm_runtime_put(dev); 1543 pm_runtime_disable(dev); 1544 1545 return ret; 1546 } 1547 1548 static int qcom_pcie_suspend_noirq(struct device *dev) 1549 { 1550 struct qcom_pcie *pcie = dev_get_drvdata(dev); 1551 int ret; 1552 1553 /* 1554 * Set minimum bandwidth required to keep data path functional during 1555 * suspend. 1556 */ 1557 ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); 1558 if (ret) { 1559 dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret); 1560 return ret; 1561 } 1562 1563 /* 1564 * Turn OFF the resources only for controllers without active PCIe 1565 * devices. For controllers with active devices, the resources are kept 1566 * ON and the link is expected to be in L0/L1 (sub)states. 1567 * 1568 * Turning OFF the resources for controllers with active PCIe devices 1569 * will trigger access violation during the end of the suspend cycle, 1570 * as kernel tries to access the PCIe devices config space for masking 1571 * MSIs. 1572 * 1573 * Also, it is not desirable to put the link into L2/L3 state as that 1574 * implies VDD supply will be removed and the devices may go into 1575 * powerdown state. This will affect the lifetime of the storage devices 1576 * like NVMe. 1577 */ 1578 if (!dw_pcie_link_up(pcie->pci)) { 1579 qcom_pcie_host_deinit(&pcie->pci->pp); 1580 pcie->suspended = true; 1581 } 1582 1583 return 0; 1584 } 1585 1586 static int qcom_pcie_resume_noirq(struct device *dev) 1587 { 1588 struct qcom_pcie *pcie = dev_get_drvdata(dev); 1589 int ret; 1590 1591 if (pcie->suspended) { 1592 ret = qcom_pcie_host_init(&pcie->pci->pp); 1593 if (ret) 1594 return ret; 1595 1596 pcie->suspended = false; 1597 } 1598 1599 qcom_pcie_icc_update(pcie); 1600 1601 return 0; 1602 } 1603 1604 static const struct of_device_id qcom_pcie_match[] = { 1605 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 1606 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 1607 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 1608 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 1609 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 1610 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 1611 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 1612 { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, 1613 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 1614 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 1615 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 }, 1616 { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0}, 1617 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 1618 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 1619 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 }, 1620 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 1621 { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 }, 1622 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 1623 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 1624 { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 }, 1625 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 1626 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 1627 { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 }, 1628 { } 1629 }; 1630 1631 static void qcom_fixup_class(struct pci_dev *dev) 1632 { 1633 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 1634 } 1635 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 1636 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 1637 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 1638 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 1639 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 1640 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 1641 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 1642 1643 static const struct dev_pm_ops qcom_pcie_pm_ops = { 1644 NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq) 1645 }; 1646 1647 static struct platform_driver qcom_pcie_driver = { 1648 .probe = qcom_pcie_probe, 1649 .driver = { 1650 .name = "qcom-pcie", 1651 .suppress_bind_attrs = true, 1652 .of_match_table = qcom_pcie_match, 1653 .pm = &qcom_pcie_pm_ops, 1654 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1655 }, 1656 }; 1657 builtin_platform_driver(qcom_pcie_driver); 1658