1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Qualcomm PCIe root complex driver 4 * 5 * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. 6 * Copyright 2015 Linaro Limited. 7 * 8 * Author: Stanimir Varbanov <svarbanov@mm-sol.com> 9 */ 10 11 #include <linux/clk.h> 12 #include <linux/crc8.h> 13 #include <linux/debugfs.h> 14 #include <linux/delay.h> 15 #include <linux/gpio/consumer.h> 16 #include <linux/interconnect.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/kernel.h> 21 #include <linux/init.h> 22 #include <linux/of.h> 23 #include <linux/of_gpio.h> 24 #include <linux/pci.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/platform_device.h> 27 #include <linux/phy/pcie.h> 28 #include <linux/phy/phy.h> 29 #include <linux/regulator/consumer.h> 30 #include <linux/reset.h> 31 #include <linux/slab.h> 32 #include <linux/types.h> 33 34 #include "../../pci.h" 35 #include "pcie-designware.h" 36 37 /* PARF registers */ 38 #define PARF_SYS_CTRL 0x00 39 #define PARF_PM_CTRL 0x20 40 #define PARF_PCS_DEEMPH 0x34 41 #define PARF_PCS_SWING 0x38 42 #define PARF_PHY_CTRL 0x40 43 #define PARF_PHY_REFCLK 0x4c 44 #define PARF_CONFIG_BITS 0x50 45 #define PARF_DBI_BASE_ADDR 0x168 46 #define PARF_MHI_CLOCK_RESET_CTRL 0x174 47 #define PARF_AXI_MSTR_WR_ADDR_HALT 0x178 48 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 49 #define PARF_Q2A_FLUSH 0x1ac 50 #define PARF_LTSSM 0x1b0 51 #define PARF_SID_OFFSET 0x234 52 #define PARF_BDF_TRANSLATE_CFG 0x24c 53 #define PARF_SLV_ADDR_SPACE_SIZE 0x358 54 #define PARF_DEVICE_TYPE 0x1000 55 #define PARF_BDF_TO_SID_TABLE_N 0x2000 56 57 /* ELBI registers */ 58 #define ELBI_SYS_CTRL 0x04 59 60 /* DBI registers */ 61 #define AXI_MSTR_RESP_COMP_CTRL0 0x818 62 #define AXI_MSTR_RESP_COMP_CTRL1 0x81c 63 64 /* MHI registers */ 65 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 66 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c 67 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10 68 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84 69 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88 70 71 /* PARF_SYS_CTRL register fields */ 72 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) 73 #define MST_WAKEUP_EN BIT(13) 74 #define SLV_WAKEUP_EN BIT(12) 75 #define MSTR_ACLK_CGC_DIS BIT(10) 76 #define SLV_ACLK_CGC_DIS BIT(9) 77 #define CORE_CLK_CGC_DIS BIT(6) 78 #define AUX_PWR_DET BIT(4) 79 #define L23_CLK_RMV_DIS BIT(2) 80 #define L1_CLK_RMV_DIS BIT(1) 81 82 /* PARF_PM_CTRL register fields */ 83 #define REQ_NOT_ENTR_L1 BIT(5) 84 85 /* PARF_PCS_DEEMPH register fields */ 86 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x) 87 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x) 88 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x) 89 90 /* PARF_PCS_SWING register fields */ 91 #define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x) 92 #define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x) 93 94 /* PARF_PHY_CTRL register fields */ 95 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) 96 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x) 97 #define PHY_TEST_PWR_DOWN BIT(0) 98 99 /* PARF_PHY_REFCLK register fields */ 100 #define PHY_REFCLK_SSP_EN BIT(16) 101 #define PHY_REFCLK_USE_PAD BIT(12) 102 103 /* PARF_CONFIG_BITS register fields */ 104 #define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x) 105 106 /* PARF_SLV_ADDR_SPACE_SIZE register value */ 107 #define SLV_ADDR_SPACE_SZ 0x10000000 108 109 /* PARF_MHI_CLOCK_RESET_CTRL register fields */ 110 #define AHB_CLK_EN BIT(0) 111 #define MSTR_AXI_CLK_EN BIT(1) 112 #define BYPASS BIT(4) 113 114 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ 115 #define EN BIT(31) 116 117 /* PARF_LTSSM register fields */ 118 #define LTSSM_EN BIT(8) 119 120 /* PARF_DEVICE_TYPE register fields */ 121 #define DEVICE_TYPE_RC 0x4 122 123 /* ELBI_SYS_CTRL register fields */ 124 #define ELBI_SYS_CTRL_LT_ENABLE BIT(0) 125 126 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */ 127 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 128 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 129 130 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */ 131 #define CFG_BRIDGE_SB_INIT BIT(0) 132 133 /* PCI_EXP_SLTCAP register fields */ 134 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250) 135 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1) 136 #define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \ 137 PCI_EXP_SLTCAP_PCP | \ 138 PCI_EXP_SLTCAP_MRLSP | \ 139 PCI_EXP_SLTCAP_AIP | \ 140 PCI_EXP_SLTCAP_PIP | \ 141 PCI_EXP_SLTCAP_HPS | \ 142 PCI_EXP_SLTCAP_EIP | \ 143 PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ 144 PCIE_CAP_SLOT_POWER_LIMIT_SCALE) 145 146 #define PERST_DELAY_US 1000 147 148 #define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0)) 149 150 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \ 151 Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed])) 152 153 #define QCOM_PCIE_1_0_0_MAX_CLOCKS 4 154 struct qcom_pcie_resources_1_0_0 { 155 struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS]; 156 struct reset_control *core; 157 struct regulator *vdda; 158 }; 159 160 #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 161 #define QCOM_PCIE_2_1_0_MAX_RESETS 6 162 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 163 struct qcom_pcie_resources_2_1_0 { 164 struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; 165 struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS]; 166 int num_resets; 167 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; 168 }; 169 170 #define QCOM_PCIE_2_3_2_MAX_CLOCKS 4 171 #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 172 struct qcom_pcie_resources_2_3_2 { 173 struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS]; 174 struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; 175 }; 176 177 #define QCOM_PCIE_2_3_3_MAX_CLOCKS 5 178 #define QCOM_PCIE_2_3_3_MAX_RESETS 7 179 struct qcom_pcie_resources_2_3_3 { 180 struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS]; 181 struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS]; 182 }; 183 184 #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4 185 #define QCOM_PCIE_2_4_0_MAX_RESETS 12 186 struct qcom_pcie_resources_2_4_0 { 187 struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS]; 188 int num_clks; 189 struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS]; 190 int num_resets; 191 }; 192 193 #define QCOM_PCIE_2_7_0_MAX_CLOCKS 15 194 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2 195 struct qcom_pcie_resources_2_7_0 { 196 struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS]; 197 int num_clks; 198 struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES]; 199 struct reset_control *rst; 200 }; 201 202 #define QCOM_PCIE_2_9_0_MAX_CLOCKS 5 203 struct qcom_pcie_resources_2_9_0 { 204 struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS]; 205 struct reset_control *rst; 206 }; 207 208 union qcom_pcie_resources { 209 struct qcom_pcie_resources_1_0_0 v1_0_0; 210 struct qcom_pcie_resources_2_1_0 v2_1_0; 211 struct qcom_pcie_resources_2_3_2 v2_3_2; 212 struct qcom_pcie_resources_2_3_3 v2_3_3; 213 struct qcom_pcie_resources_2_4_0 v2_4_0; 214 struct qcom_pcie_resources_2_7_0 v2_7_0; 215 struct qcom_pcie_resources_2_9_0 v2_9_0; 216 }; 217 218 struct qcom_pcie; 219 220 struct qcom_pcie_ops { 221 int (*get_resources)(struct qcom_pcie *pcie); 222 int (*init)(struct qcom_pcie *pcie); 223 int (*post_init)(struct qcom_pcie *pcie); 224 void (*host_post_init)(struct qcom_pcie *pcie); 225 void (*deinit)(struct qcom_pcie *pcie); 226 void (*ltssm_enable)(struct qcom_pcie *pcie); 227 int (*config_sid)(struct qcom_pcie *pcie); 228 }; 229 230 struct qcom_pcie_cfg { 231 const struct qcom_pcie_ops *ops; 232 }; 233 234 struct qcom_pcie { 235 struct dw_pcie *pci; 236 void __iomem *parf; /* DT parf */ 237 void __iomem *elbi; /* DT elbi */ 238 void __iomem *mhi; 239 union qcom_pcie_resources res; 240 struct phy *phy; 241 struct gpio_desc *reset; 242 struct icc_path *icc_mem; 243 const struct qcom_pcie_cfg *cfg; 244 struct dentry *debugfs; 245 bool suspended; 246 }; 247 248 #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) 249 250 static void qcom_ep_reset_assert(struct qcom_pcie *pcie) 251 { 252 gpiod_set_value_cansleep(pcie->reset, 1); 253 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 254 } 255 256 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) 257 { 258 /* Ensure that PERST has been asserted for at least 100 ms */ 259 msleep(100); 260 gpiod_set_value_cansleep(pcie->reset, 0); 261 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); 262 } 263 264 static int qcom_pcie_start_link(struct dw_pcie *pci) 265 { 266 struct qcom_pcie *pcie = to_qcom_pcie(pci); 267 268 /* Enable Link Training state machine */ 269 if (pcie->cfg->ops->ltssm_enable) 270 pcie->cfg->ops->ltssm_enable(pcie); 271 272 return 0; 273 } 274 275 static void qcom_pcie_clear_hpc(struct dw_pcie *pci) 276 { 277 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 278 u32 val; 279 280 dw_pcie_dbi_ro_wr_en(pci); 281 282 val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); 283 val &= ~PCI_EXP_SLTCAP_HPC; 284 writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); 285 286 dw_pcie_dbi_ro_wr_dis(pci); 287 } 288 289 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) 290 { 291 u32 val; 292 293 /* enable link training */ 294 val = readl(pcie->elbi + ELBI_SYS_CTRL); 295 val |= ELBI_SYS_CTRL_LT_ENABLE; 296 writel(val, pcie->elbi + ELBI_SYS_CTRL); 297 } 298 299 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) 300 { 301 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 302 struct dw_pcie *pci = pcie->pci; 303 struct device *dev = pci->dev; 304 bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064"); 305 int ret; 306 307 res->supplies[0].supply = "vdda"; 308 res->supplies[1].supply = "vdda_phy"; 309 res->supplies[2].supply = "vdda_refclk"; 310 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 311 res->supplies); 312 if (ret) 313 return ret; 314 315 res->clks[0].id = "iface"; 316 res->clks[1].id = "core"; 317 res->clks[2].id = "phy"; 318 res->clks[3].id = "aux"; 319 res->clks[4].id = "ref"; 320 321 /* iface, core, phy are required */ 322 ret = devm_clk_bulk_get(dev, 3, res->clks); 323 if (ret < 0) 324 return ret; 325 326 /* aux, ref are optional */ 327 ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); 328 if (ret < 0) 329 return ret; 330 331 res->resets[0].id = "pci"; 332 res->resets[1].id = "axi"; 333 res->resets[2].id = "ahb"; 334 res->resets[3].id = "por"; 335 res->resets[4].id = "phy"; 336 res->resets[5].id = "ext"; 337 338 /* ext is optional on APQ8016 */ 339 res->num_resets = is_apq ? 5 : 6; 340 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 341 if (ret < 0) 342 return ret; 343 344 return 0; 345 } 346 347 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) 348 { 349 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 350 351 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 352 reset_control_bulk_assert(res->num_resets, res->resets); 353 354 writel(1, pcie->parf + PARF_PHY_CTRL); 355 356 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 357 } 358 359 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) 360 { 361 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 362 struct dw_pcie *pci = pcie->pci; 363 struct device *dev = pci->dev; 364 int ret; 365 366 /* reset the PCIe interface as uboot can leave it undefined state */ 367 ret = reset_control_bulk_assert(res->num_resets, res->resets); 368 if (ret < 0) { 369 dev_err(dev, "cannot assert resets\n"); 370 return ret; 371 } 372 373 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 374 if (ret < 0) { 375 dev_err(dev, "cannot enable regulators\n"); 376 return ret; 377 } 378 379 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 380 if (ret < 0) { 381 dev_err(dev, "cannot deassert resets\n"); 382 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 383 return ret; 384 } 385 386 return 0; 387 } 388 389 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) 390 { 391 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; 392 struct dw_pcie *pci = pcie->pci; 393 struct device *dev = pci->dev; 394 struct device_node *node = dev->of_node; 395 u32 val; 396 int ret; 397 398 /* enable PCIe clocks and resets */ 399 val = readl(pcie->parf + PARF_PHY_CTRL); 400 val &= ~PHY_TEST_PWR_DOWN; 401 writel(val, pcie->parf + PARF_PHY_CTRL); 402 403 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 404 if (ret) 405 return ret; 406 407 if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || 408 of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { 409 writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | 410 PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | 411 PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), 412 pcie->parf + PARF_PCS_DEEMPH); 413 writel(PCS_SWING_TX_SWING_FULL(120) | 414 PCS_SWING_TX_SWING_LOW(120), 415 pcie->parf + PARF_PCS_SWING); 416 writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS); 417 } 418 419 if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { 420 /* set TX termination offset */ 421 val = readl(pcie->parf + PARF_PHY_CTRL); 422 val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; 423 val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); 424 writel(val, pcie->parf + PARF_PHY_CTRL); 425 } 426 427 /* enable external reference clock */ 428 val = readl(pcie->parf + PARF_PHY_REFCLK); 429 /* USE_PAD is required only for ipq806x */ 430 if (!of_device_is_compatible(node, "qcom,pcie-apq8064")) 431 val &= ~PHY_REFCLK_USE_PAD; 432 val |= PHY_REFCLK_SSP_EN; 433 writel(val, pcie->parf + PARF_PHY_REFCLK); 434 435 /* wait for clock acquisition */ 436 usleep_range(1000, 1500); 437 438 /* Set the Max TLP size to 2K, instead of using default of 4K */ 439 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, 440 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0); 441 writel(CFG_BRIDGE_SB_INIT, 442 pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); 443 444 qcom_pcie_clear_hpc(pcie->pci); 445 446 return 0; 447 } 448 449 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) 450 { 451 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 452 struct dw_pcie *pci = pcie->pci; 453 struct device *dev = pci->dev; 454 int ret; 455 456 res->vdda = devm_regulator_get(dev, "vdda"); 457 if (IS_ERR(res->vdda)) 458 return PTR_ERR(res->vdda); 459 460 res->clks[0].id = "iface"; 461 res->clks[1].id = "aux"; 462 res->clks[2].id = "master_bus"; 463 res->clks[3].id = "slave_bus"; 464 465 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 466 if (ret < 0) 467 return ret; 468 469 res->core = devm_reset_control_get_exclusive(dev, "core"); 470 return PTR_ERR_OR_ZERO(res->core); 471 } 472 473 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) 474 { 475 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 476 477 reset_control_assert(res->core); 478 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 479 regulator_disable(res->vdda); 480 } 481 482 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) 483 { 484 struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; 485 struct dw_pcie *pci = pcie->pci; 486 struct device *dev = pci->dev; 487 int ret; 488 489 ret = reset_control_deassert(res->core); 490 if (ret) { 491 dev_err(dev, "cannot deassert core reset\n"); 492 return ret; 493 } 494 495 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 496 if (ret) { 497 dev_err(dev, "cannot prepare/enable clocks\n"); 498 goto err_assert_reset; 499 } 500 501 ret = regulator_enable(res->vdda); 502 if (ret) { 503 dev_err(dev, "cannot enable vdda regulator\n"); 504 goto err_disable_clks; 505 } 506 507 return 0; 508 509 err_disable_clks: 510 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 511 err_assert_reset: 512 reset_control_assert(res->core); 513 514 return ret; 515 } 516 517 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) 518 { 519 /* change DBI base address */ 520 writel(0, pcie->parf + PARF_DBI_BASE_ADDR); 521 522 if (IS_ENABLED(CONFIG_PCI_MSI)) { 523 u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 524 525 val |= EN; 526 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); 527 } 528 529 qcom_pcie_clear_hpc(pcie->pci); 530 531 return 0; 532 } 533 534 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) 535 { 536 u32 val; 537 538 /* enable link training */ 539 val = readl(pcie->parf + PARF_LTSSM); 540 val |= LTSSM_EN; 541 writel(val, pcie->parf + PARF_LTSSM); 542 } 543 544 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) 545 { 546 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 547 struct dw_pcie *pci = pcie->pci; 548 struct device *dev = pci->dev; 549 int ret; 550 551 res->supplies[0].supply = "vdda"; 552 res->supplies[1].supply = "vddpe-3v3"; 553 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 554 res->supplies); 555 if (ret) 556 return ret; 557 558 res->clks[0].id = "aux"; 559 res->clks[1].id = "cfg"; 560 res->clks[2].id = "bus_master"; 561 res->clks[3].id = "bus_slave"; 562 563 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 564 if (ret < 0) 565 return ret; 566 567 return 0; 568 } 569 570 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) 571 { 572 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 573 574 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 575 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 576 } 577 578 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) 579 { 580 struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; 581 struct dw_pcie *pci = pcie->pci; 582 struct device *dev = pci->dev; 583 int ret; 584 585 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 586 if (ret < 0) { 587 dev_err(dev, "cannot enable regulators\n"); 588 return ret; 589 } 590 591 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 592 if (ret) { 593 dev_err(dev, "cannot prepare/enable clocks\n"); 594 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 595 return ret; 596 } 597 598 return 0; 599 } 600 601 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) 602 { 603 u32 val; 604 605 /* enable PCIe clocks and resets */ 606 val = readl(pcie->parf + PARF_PHY_CTRL); 607 val &= ~PHY_TEST_PWR_DOWN; 608 writel(val, pcie->parf + PARF_PHY_CTRL); 609 610 /* change DBI base address */ 611 writel(0, pcie->parf + PARF_DBI_BASE_ADDR); 612 613 /* MAC PHY_POWERDOWN MUX DISABLE */ 614 val = readl(pcie->parf + PARF_SYS_CTRL); 615 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 616 writel(val, pcie->parf + PARF_SYS_CTRL); 617 618 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 619 val |= BYPASS; 620 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 621 622 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 623 val |= EN; 624 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 625 626 qcom_pcie_clear_hpc(pcie->pci); 627 628 return 0; 629 } 630 631 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) 632 { 633 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 634 struct dw_pcie *pci = pcie->pci; 635 struct device *dev = pci->dev; 636 bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019"); 637 int ret; 638 639 res->clks[0].id = "aux"; 640 res->clks[1].id = "master_bus"; 641 res->clks[2].id = "slave_bus"; 642 res->clks[3].id = "iface"; 643 644 /* qcom,pcie-ipq4019 is defined without "iface" */ 645 res->num_clks = is_ipq ? 3 : 4; 646 647 ret = devm_clk_bulk_get(dev, res->num_clks, res->clks); 648 if (ret < 0) 649 return ret; 650 651 res->resets[0].id = "axi_m"; 652 res->resets[1].id = "axi_s"; 653 res->resets[2].id = "axi_m_sticky"; 654 res->resets[3].id = "pipe_sticky"; 655 res->resets[4].id = "pwr"; 656 res->resets[5].id = "ahb"; 657 res->resets[6].id = "pipe"; 658 res->resets[7].id = "axi_m_vmid"; 659 res->resets[8].id = "axi_s_xpu"; 660 res->resets[9].id = "parf"; 661 res->resets[10].id = "phy"; 662 res->resets[11].id = "phy_ahb"; 663 664 res->num_resets = is_ipq ? 12 : 6; 665 666 ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets); 667 if (ret < 0) 668 return ret; 669 670 return 0; 671 } 672 673 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) 674 { 675 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 676 677 reset_control_bulk_assert(res->num_resets, res->resets); 678 clk_bulk_disable_unprepare(res->num_clks, res->clks); 679 } 680 681 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) 682 { 683 struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; 684 struct dw_pcie *pci = pcie->pci; 685 struct device *dev = pci->dev; 686 int ret; 687 688 ret = reset_control_bulk_assert(res->num_resets, res->resets); 689 if (ret < 0) { 690 dev_err(dev, "cannot assert resets\n"); 691 return ret; 692 } 693 694 usleep_range(10000, 12000); 695 696 ret = reset_control_bulk_deassert(res->num_resets, res->resets); 697 if (ret < 0) { 698 dev_err(dev, "cannot deassert resets\n"); 699 return ret; 700 } 701 702 usleep_range(10000, 12000); 703 704 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 705 if (ret) { 706 reset_control_bulk_assert(res->num_resets, res->resets); 707 return ret; 708 } 709 710 return 0; 711 } 712 713 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) 714 { 715 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 716 struct dw_pcie *pci = pcie->pci; 717 struct device *dev = pci->dev; 718 int ret; 719 720 res->clks[0].id = "iface"; 721 res->clks[1].id = "axi_m"; 722 res->clks[2].id = "axi_s"; 723 res->clks[3].id = "ahb"; 724 res->clks[4].id = "aux"; 725 726 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 727 if (ret < 0) 728 return ret; 729 730 res->rst[0].id = "axi_m"; 731 res->rst[1].id = "axi_s"; 732 res->rst[2].id = "pipe"; 733 res->rst[3].id = "axi_m_sticky"; 734 res->rst[4].id = "sticky"; 735 res->rst[5].id = "ahb"; 736 res->rst[6].id = "sleep"; 737 738 ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst); 739 if (ret < 0) 740 return ret; 741 742 return 0; 743 } 744 745 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) 746 { 747 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 748 749 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 750 } 751 752 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) 753 { 754 struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; 755 struct dw_pcie *pci = pcie->pci; 756 struct device *dev = pci->dev; 757 int ret; 758 759 ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 760 if (ret < 0) { 761 dev_err(dev, "cannot assert resets\n"); 762 return ret; 763 } 764 765 usleep_range(2000, 2500); 766 767 ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst); 768 if (ret < 0) { 769 dev_err(dev, "cannot deassert resets\n"); 770 return ret; 771 } 772 773 /* 774 * Don't have a way to see if the reset has completed. 775 * Wait for some time. 776 */ 777 usleep_range(2000, 2500); 778 779 ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 780 if (ret) { 781 dev_err(dev, "cannot prepare/enable clocks\n"); 782 goto err_assert_resets; 783 } 784 785 return 0; 786 787 err_assert_resets: 788 /* 789 * Not checking for failure, will anyway return 790 * the original failure in 'ret'. 791 */ 792 reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst); 793 794 return ret; 795 } 796 797 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) 798 { 799 struct dw_pcie *pci = pcie->pci; 800 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 801 u32 val; 802 803 writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE); 804 805 val = readl(pcie->parf + PARF_PHY_CTRL); 806 val &= ~PHY_TEST_PWR_DOWN; 807 writel(val, pcie->parf + PARF_PHY_CTRL); 808 809 writel(0, pcie->parf + PARF_DBI_BASE_ADDR); 810 811 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS 812 | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 813 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 814 pcie->parf + PARF_SYS_CTRL); 815 writel(0, pcie->parf + PARF_Q2A_FLUSH); 816 817 writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); 818 819 dw_pcie_dbi_ro_wr_en(pci); 820 821 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 822 823 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 824 val &= ~PCI_EXP_LNKCAP_ASPMS; 825 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 826 827 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 828 PCI_EXP_DEVCTL2); 829 830 dw_pcie_dbi_ro_wr_dis(pci); 831 832 return 0; 833 } 834 835 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie) 836 { 837 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 838 struct dw_pcie *pci = pcie->pci; 839 struct device *dev = pci->dev; 840 unsigned int num_clks, num_opt_clks; 841 unsigned int idx; 842 int ret; 843 844 res->rst = devm_reset_control_array_get_exclusive(dev); 845 if (IS_ERR(res->rst)) 846 return PTR_ERR(res->rst); 847 848 res->supplies[0].supply = "vdda"; 849 res->supplies[1].supply = "vddpe-3v3"; 850 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), 851 res->supplies); 852 if (ret) 853 return ret; 854 855 idx = 0; 856 res->clks[idx++].id = "aux"; 857 res->clks[idx++].id = "cfg"; 858 res->clks[idx++].id = "bus_master"; 859 res->clks[idx++].id = "bus_slave"; 860 res->clks[idx++].id = "slave_q2a"; 861 862 num_clks = idx; 863 864 ret = devm_clk_bulk_get(dev, num_clks, res->clks); 865 if (ret < 0) 866 return ret; 867 868 res->clks[idx++].id = "tbu"; 869 res->clks[idx++].id = "ddrss_sf_tbu"; 870 res->clks[idx++].id = "aggre0"; 871 res->clks[idx++].id = "aggre1"; 872 res->clks[idx++].id = "noc_aggr"; 873 res->clks[idx++].id = "noc_aggr_4"; 874 res->clks[idx++].id = "noc_aggr_south_sf"; 875 res->clks[idx++].id = "cnoc_qx"; 876 res->clks[idx++].id = "sleep"; 877 res->clks[idx++].id = "cnoc_sf_axi"; 878 879 num_opt_clks = idx - num_clks; 880 res->num_clks = idx; 881 882 ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks); 883 if (ret < 0) 884 return ret; 885 886 return 0; 887 } 888 889 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie) 890 { 891 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 892 struct dw_pcie *pci = pcie->pci; 893 struct device *dev = pci->dev; 894 u32 val; 895 int ret; 896 897 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); 898 if (ret < 0) { 899 dev_err(dev, "cannot enable regulators\n"); 900 return ret; 901 } 902 903 ret = clk_bulk_prepare_enable(res->num_clks, res->clks); 904 if (ret < 0) 905 goto err_disable_regulators; 906 907 ret = reset_control_assert(res->rst); 908 if (ret) { 909 dev_err(dev, "reset assert failed (%d)\n", ret); 910 goto err_disable_clocks; 911 } 912 913 usleep_range(1000, 1500); 914 915 ret = reset_control_deassert(res->rst); 916 if (ret) { 917 dev_err(dev, "reset deassert failed (%d)\n", ret); 918 goto err_disable_clocks; 919 } 920 921 /* Wait for reset to complete, required on SM8450 */ 922 usleep_range(1000, 1500); 923 924 /* configure PCIe to RC mode */ 925 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 926 927 /* enable PCIe clocks and resets */ 928 val = readl(pcie->parf + PARF_PHY_CTRL); 929 val &= ~PHY_TEST_PWR_DOWN; 930 writel(val, pcie->parf + PARF_PHY_CTRL); 931 932 /* change DBI base address */ 933 writel(0, pcie->parf + PARF_DBI_BASE_ADDR); 934 935 /* MAC PHY_POWERDOWN MUX DISABLE */ 936 val = readl(pcie->parf + PARF_SYS_CTRL); 937 val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; 938 writel(val, pcie->parf + PARF_SYS_CTRL); 939 940 val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 941 val |= BYPASS; 942 writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 943 944 /* Enable L1 and L1SS */ 945 val = readl(pcie->parf + PARF_PM_CTRL); 946 val &= ~REQ_NOT_ENTR_L1; 947 writel(val, pcie->parf + PARF_PM_CTRL); 948 949 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 950 val |= EN; 951 writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); 952 953 return 0; 954 err_disable_clocks: 955 clk_bulk_disable_unprepare(res->num_clks, res->clks); 956 err_disable_regulators: 957 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 958 959 return ret; 960 } 961 962 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) 963 { 964 qcom_pcie_clear_hpc(pcie->pci); 965 966 return 0; 967 } 968 969 static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata) 970 { 971 /* 972 * Downstream devices need to be in D0 state before enabling PCI PM 973 * substates. 974 */ 975 pci_set_power_state_locked(pdev, PCI_D0); 976 pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL); 977 978 return 0; 979 } 980 981 static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie) 982 { 983 struct dw_pcie_rp *pp = &pcie->pci->pp; 984 985 pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL); 986 } 987 988 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) 989 { 990 struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; 991 992 clk_bulk_disable_unprepare(res->num_clks, res->clks); 993 994 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); 995 } 996 997 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie) 998 { 999 /* iommu map structure */ 1000 struct { 1001 u32 bdf; 1002 u32 phandle; 1003 u32 smmu_sid; 1004 u32 smmu_sid_len; 1005 } *map; 1006 void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N; 1007 struct device *dev = pcie->pci->dev; 1008 u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE]; 1009 int i, nr_map, size = 0; 1010 u32 smmu_sid_base; 1011 1012 of_get_property(dev->of_node, "iommu-map", &size); 1013 if (!size) 1014 return 0; 1015 1016 map = kzalloc(size, GFP_KERNEL); 1017 if (!map) 1018 return -ENOMEM; 1019 1020 of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map, 1021 size / sizeof(u32)); 1022 1023 nr_map = size / (sizeof(*map)); 1024 1025 crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL); 1026 1027 /* Registers need to be zero out first */ 1028 memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32)); 1029 1030 /* Extract the SMMU SID base from the first entry of iommu-map */ 1031 smmu_sid_base = map[0].smmu_sid; 1032 1033 /* Look for an available entry to hold the mapping */ 1034 for (i = 0; i < nr_map; i++) { 1035 __be16 bdf_be = cpu_to_be16(map[i].bdf); 1036 u32 val; 1037 u8 hash; 1038 1039 hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0); 1040 1041 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1042 1043 /* If the register is already populated, look for next available entry */ 1044 while (val) { 1045 u8 current_hash = hash++; 1046 u8 next_mask = 0xff; 1047 1048 /* If NEXT field is NULL then update it with next hash */ 1049 if (!(val & next_mask)) { 1050 val |= (u32)hash; 1051 writel(val, bdf_to_sid_base + current_hash * sizeof(u32)); 1052 } 1053 1054 val = readl(bdf_to_sid_base + hash * sizeof(u32)); 1055 } 1056 1057 /* BDF [31:16] | SID [15:8] | NEXT [7:0] */ 1058 val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0; 1059 writel(val, bdf_to_sid_base + hash * sizeof(u32)); 1060 } 1061 1062 kfree(map); 1063 1064 return 0; 1065 } 1066 1067 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie) 1068 { 1069 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1070 struct dw_pcie *pci = pcie->pci; 1071 struct device *dev = pci->dev; 1072 int ret; 1073 1074 res->clks[0].id = "iface"; 1075 res->clks[1].id = "axi_m"; 1076 res->clks[2].id = "axi_s"; 1077 res->clks[3].id = "axi_bridge"; 1078 res->clks[4].id = "rchng"; 1079 1080 ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks); 1081 if (ret < 0) 1082 return ret; 1083 1084 res->rst = devm_reset_control_array_get_exclusive(dev); 1085 if (IS_ERR(res->rst)) 1086 return PTR_ERR(res->rst); 1087 1088 return 0; 1089 } 1090 1091 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie) 1092 { 1093 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1094 1095 clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); 1096 } 1097 1098 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie) 1099 { 1100 struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0; 1101 struct device *dev = pcie->pci->dev; 1102 int ret; 1103 1104 ret = reset_control_assert(res->rst); 1105 if (ret) { 1106 dev_err(dev, "reset assert failed (%d)\n", ret); 1107 return ret; 1108 } 1109 1110 /* 1111 * Delay periods before and after reset deassert are working values 1112 * from downstream Codeaurora kernel 1113 */ 1114 usleep_range(2000, 2500); 1115 1116 ret = reset_control_deassert(res->rst); 1117 if (ret) { 1118 dev_err(dev, "reset deassert failed (%d)\n", ret); 1119 return ret; 1120 } 1121 1122 usleep_range(2000, 2500); 1123 1124 return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); 1125 } 1126 1127 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) 1128 { 1129 struct dw_pcie *pci = pcie->pci; 1130 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1131 u32 val; 1132 int i; 1133 1134 writel(SLV_ADDR_SPACE_SZ, 1135 pcie->parf + PARF_SLV_ADDR_SPACE_SIZE); 1136 1137 val = readl(pcie->parf + PARF_PHY_CTRL); 1138 val &= ~PHY_TEST_PWR_DOWN; 1139 writel(val, pcie->parf + PARF_PHY_CTRL); 1140 1141 writel(0, pcie->parf + PARF_DBI_BASE_ADDR); 1142 1143 writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE); 1144 writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN, 1145 pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); 1146 writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS | 1147 GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL, 1148 pci->dbi_base + GEN3_RELATED_OFF); 1149 1150 writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | 1151 SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | 1152 AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, 1153 pcie->parf + PARF_SYS_CTRL); 1154 1155 writel(0, pcie->parf + PARF_Q2A_FLUSH); 1156 1157 dw_pcie_dbi_ro_wr_en(pci); 1158 1159 writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); 1160 1161 val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); 1162 val &= ~PCI_EXP_LNKCAP_ASPMS; 1163 writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); 1164 1165 writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + 1166 PCI_EXP_DEVCTL2); 1167 1168 dw_pcie_dbi_ro_wr_dis(pci); 1169 1170 for (i = 0; i < 256; i++) 1171 writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); 1172 1173 return 0; 1174 } 1175 1176 static int qcom_pcie_link_up(struct dw_pcie *pci) 1177 { 1178 u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1179 u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1180 1181 return !!(val & PCI_EXP_LNKSTA_DLLLA); 1182 } 1183 1184 static int qcom_pcie_host_init(struct dw_pcie_rp *pp) 1185 { 1186 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1187 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1188 int ret; 1189 1190 qcom_ep_reset_assert(pcie); 1191 1192 ret = pcie->cfg->ops->init(pcie); 1193 if (ret) 1194 return ret; 1195 1196 ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); 1197 if (ret) 1198 goto err_deinit; 1199 1200 ret = phy_power_on(pcie->phy); 1201 if (ret) 1202 goto err_deinit; 1203 1204 if (pcie->cfg->ops->post_init) { 1205 ret = pcie->cfg->ops->post_init(pcie); 1206 if (ret) 1207 goto err_disable_phy; 1208 } 1209 1210 qcom_ep_reset_deassert(pcie); 1211 1212 if (pcie->cfg->ops->config_sid) { 1213 ret = pcie->cfg->ops->config_sid(pcie); 1214 if (ret) 1215 goto err_assert_reset; 1216 } 1217 1218 return 0; 1219 1220 err_assert_reset: 1221 qcom_ep_reset_assert(pcie); 1222 err_disable_phy: 1223 phy_power_off(pcie->phy); 1224 err_deinit: 1225 pcie->cfg->ops->deinit(pcie); 1226 1227 return ret; 1228 } 1229 1230 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) 1231 { 1232 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1233 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1234 1235 qcom_ep_reset_assert(pcie); 1236 phy_power_off(pcie->phy); 1237 pcie->cfg->ops->deinit(pcie); 1238 } 1239 1240 static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp) 1241 { 1242 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1243 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1244 1245 if (pcie->cfg->ops->host_post_init) 1246 pcie->cfg->ops->host_post_init(pcie); 1247 } 1248 1249 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { 1250 .init = qcom_pcie_host_init, 1251 .deinit = qcom_pcie_host_deinit, 1252 .post_init = qcom_pcie_host_post_init, 1253 }; 1254 1255 /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ 1256 static const struct qcom_pcie_ops ops_2_1_0 = { 1257 .get_resources = qcom_pcie_get_resources_2_1_0, 1258 .init = qcom_pcie_init_2_1_0, 1259 .post_init = qcom_pcie_post_init_2_1_0, 1260 .deinit = qcom_pcie_deinit_2_1_0, 1261 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1262 }; 1263 1264 /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ 1265 static const struct qcom_pcie_ops ops_1_0_0 = { 1266 .get_resources = qcom_pcie_get_resources_1_0_0, 1267 .init = qcom_pcie_init_1_0_0, 1268 .post_init = qcom_pcie_post_init_1_0_0, 1269 .deinit = qcom_pcie_deinit_1_0_0, 1270 .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, 1271 }; 1272 1273 /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ 1274 static const struct qcom_pcie_ops ops_2_3_2 = { 1275 .get_resources = qcom_pcie_get_resources_2_3_2, 1276 .init = qcom_pcie_init_2_3_2, 1277 .post_init = qcom_pcie_post_init_2_3_2, 1278 .deinit = qcom_pcie_deinit_2_3_2, 1279 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1280 }; 1281 1282 /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ 1283 static const struct qcom_pcie_ops ops_2_4_0 = { 1284 .get_resources = qcom_pcie_get_resources_2_4_0, 1285 .init = qcom_pcie_init_2_4_0, 1286 .post_init = qcom_pcie_post_init_2_3_2, 1287 .deinit = qcom_pcie_deinit_2_4_0, 1288 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1289 }; 1290 1291 /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ 1292 static const struct qcom_pcie_ops ops_2_3_3 = { 1293 .get_resources = qcom_pcie_get_resources_2_3_3, 1294 .init = qcom_pcie_init_2_3_3, 1295 .post_init = qcom_pcie_post_init_2_3_3, 1296 .deinit = qcom_pcie_deinit_2_3_3, 1297 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1298 }; 1299 1300 /* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */ 1301 static const struct qcom_pcie_ops ops_2_7_0 = { 1302 .get_resources = qcom_pcie_get_resources_2_7_0, 1303 .init = qcom_pcie_init_2_7_0, 1304 .post_init = qcom_pcie_post_init_2_7_0, 1305 .deinit = qcom_pcie_deinit_2_7_0, 1306 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1307 }; 1308 1309 /* Qcom IP rev.: 1.9.0 */ 1310 static const struct qcom_pcie_ops ops_1_9_0 = { 1311 .get_resources = qcom_pcie_get_resources_2_7_0, 1312 .init = qcom_pcie_init_2_7_0, 1313 .post_init = qcom_pcie_post_init_2_7_0, 1314 .host_post_init = qcom_pcie_host_post_init_2_7_0, 1315 .deinit = qcom_pcie_deinit_2_7_0, 1316 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1317 .config_sid = qcom_pcie_config_sid_1_9_0, 1318 }; 1319 1320 /* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */ 1321 static const struct qcom_pcie_ops ops_2_9_0 = { 1322 .get_resources = qcom_pcie_get_resources_2_9_0, 1323 .init = qcom_pcie_init_2_9_0, 1324 .post_init = qcom_pcie_post_init_2_9_0, 1325 .deinit = qcom_pcie_deinit_2_9_0, 1326 .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, 1327 }; 1328 1329 static const struct qcom_pcie_cfg cfg_1_0_0 = { 1330 .ops = &ops_1_0_0, 1331 }; 1332 1333 static const struct qcom_pcie_cfg cfg_1_9_0 = { 1334 .ops = &ops_1_9_0, 1335 }; 1336 1337 static const struct qcom_pcie_cfg cfg_2_1_0 = { 1338 .ops = &ops_2_1_0, 1339 }; 1340 1341 static const struct qcom_pcie_cfg cfg_2_3_2 = { 1342 .ops = &ops_2_3_2, 1343 }; 1344 1345 static const struct qcom_pcie_cfg cfg_2_3_3 = { 1346 .ops = &ops_2_3_3, 1347 }; 1348 1349 static const struct qcom_pcie_cfg cfg_2_4_0 = { 1350 .ops = &ops_2_4_0, 1351 }; 1352 1353 static const struct qcom_pcie_cfg cfg_2_7_0 = { 1354 .ops = &ops_2_7_0, 1355 }; 1356 1357 static const struct qcom_pcie_cfg cfg_2_9_0 = { 1358 .ops = &ops_2_9_0, 1359 }; 1360 1361 static const struct dw_pcie_ops dw_pcie_ops = { 1362 .link_up = qcom_pcie_link_up, 1363 .start_link = qcom_pcie_start_link, 1364 }; 1365 1366 static int qcom_pcie_icc_init(struct qcom_pcie *pcie) 1367 { 1368 struct dw_pcie *pci = pcie->pci; 1369 int ret; 1370 1371 pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem"); 1372 if (IS_ERR(pcie->icc_mem)) 1373 return PTR_ERR(pcie->icc_mem); 1374 1375 /* 1376 * Some Qualcomm platforms require interconnect bandwidth constraints 1377 * to be set before enabling interconnect clocks. 1378 * 1379 * Set an initial peak bandwidth corresponding to single-lane Gen 1 1380 * for the pcie-mem path. 1381 */ 1382 ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1)); 1383 if (ret) { 1384 dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", 1385 ret); 1386 return ret; 1387 } 1388 1389 return 0; 1390 } 1391 1392 static void qcom_pcie_icc_update(struct qcom_pcie *pcie) 1393 { 1394 struct dw_pcie *pci = pcie->pci; 1395 u32 offset, status; 1396 int speed, width; 1397 int ret; 1398 1399 if (!pcie->icc_mem) 1400 return; 1401 1402 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); 1403 status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); 1404 1405 /* Only update constraints if link is up. */ 1406 if (!(status & PCI_EXP_LNKSTA_DLLLA)) 1407 return; 1408 1409 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status); 1410 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status); 1411 1412 ret = icc_set_bw(pcie->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed)); 1413 if (ret) { 1414 dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n", 1415 ret); 1416 } 1417 } 1418 1419 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data) 1420 { 1421 struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private); 1422 1423 seq_printf(s, "L0s transition count: %u\n", 1424 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S)); 1425 1426 seq_printf(s, "L1 transition count: %u\n", 1427 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1)); 1428 1429 seq_printf(s, "L1.1 transition count: %u\n", 1430 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1)); 1431 1432 seq_printf(s, "L1.2 transition count: %u\n", 1433 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2)); 1434 1435 seq_printf(s, "L2 transition count: %u\n", 1436 readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2)); 1437 1438 return 0; 1439 } 1440 1441 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie) 1442 { 1443 struct dw_pcie *pci = pcie->pci; 1444 struct device *dev = pci->dev; 1445 char *name; 1446 1447 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 1448 if (!name) 1449 return; 1450 1451 pcie->debugfs = debugfs_create_dir(name, NULL); 1452 debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs, 1453 qcom_pcie_link_transition_count); 1454 } 1455 1456 static int qcom_pcie_probe(struct platform_device *pdev) 1457 { 1458 const struct qcom_pcie_cfg *pcie_cfg; 1459 struct device *dev = &pdev->dev; 1460 struct qcom_pcie *pcie; 1461 struct dw_pcie_rp *pp; 1462 struct resource *res; 1463 struct dw_pcie *pci; 1464 int ret; 1465 1466 pcie_cfg = of_device_get_match_data(dev); 1467 if (!pcie_cfg || !pcie_cfg->ops) { 1468 dev_err(dev, "Invalid platform data\n"); 1469 return -EINVAL; 1470 } 1471 1472 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 1473 if (!pcie) 1474 return -ENOMEM; 1475 1476 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); 1477 if (!pci) 1478 return -ENOMEM; 1479 1480 pm_runtime_enable(dev); 1481 ret = pm_runtime_get_sync(dev); 1482 if (ret < 0) 1483 goto err_pm_runtime_put; 1484 1485 pci->dev = dev; 1486 pci->ops = &dw_pcie_ops; 1487 pp = &pci->pp; 1488 1489 pcie->pci = pci; 1490 1491 pcie->cfg = pcie_cfg; 1492 1493 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); 1494 if (IS_ERR(pcie->reset)) { 1495 ret = PTR_ERR(pcie->reset); 1496 goto err_pm_runtime_put; 1497 } 1498 1499 pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); 1500 if (IS_ERR(pcie->parf)) { 1501 ret = PTR_ERR(pcie->parf); 1502 goto err_pm_runtime_put; 1503 } 1504 1505 pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); 1506 if (IS_ERR(pcie->elbi)) { 1507 ret = PTR_ERR(pcie->elbi); 1508 goto err_pm_runtime_put; 1509 } 1510 1511 /* MHI region is optional */ 1512 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi"); 1513 if (res) { 1514 pcie->mhi = devm_ioremap_resource(dev, res); 1515 if (IS_ERR(pcie->mhi)) { 1516 ret = PTR_ERR(pcie->mhi); 1517 goto err_pm_runtime_put; 1518 } 1519 } 1520 1521 pcie->phy = devm_phy_optional_get(dev, "pciephy"); 1522 if (IS_ERR(pcie->phy)) { 1523 ret = PTR_ERR(pcie->phy); 1524 goto err_pm_runtime_put; 1525 } 1526 1527 ret = qcom_pcie_icc_init(pcie); 1528 if (ret) 1529 goto err_pm_runtime_put; 1530 1531 ret = pcie->cfg->ops->get_resources(pcie); 1532 if (ret) 1533 goto err_pm_runtime_put; 1534 1535 pp->ops = &qcom_pcie_dw_ops; 1536 1537 ret = phy_init(pcie->phy); 1538 if (ret) 1539 goto err_pm_runtime_put; 1540 1541 platform_set_drvdata(pdev, pcie); 1542 1543 ret = dw_pcie_host_init(pp); 1544 if (ret) { 1545 dev_err(dev, "cannot initialize host\n"); 1546 goto err_phy_exit; 1547 } 1548 1549 qcom_pcie_icc_update(pcie); 1550 1551 if (pcie->mhi) 1552 qcom_pcie_init_debugfs(pcie); 1553 1554 return 0; 1555 1556 err_phy_exit: 1557 phy_exit(pcie->phy); 1558 err_pm_runtime_put: 1559 pm_runtime_put(dev); 1560 pm_runtime_disable(dev); 1561 1562 return ret; 1563 } 1564 1565 static int qcom_pcie_suspend_noirq(struct device *dev) 1566 { 1567 struct qcom_pcie *pcie = dev_get_drvdata(dev); 1568 int ret; 1569 1570 /* 1571 * Set minimum bandwidth required to keep data path functional during 1572 * suspend. 1573 */ 1574 ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1)); 1575 if (ret) { 1576 dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret); 1577 return ret; 1578 } 1579 1580 /* 1581 * Turn OFF the resources only for controllers without active PCIe 1582 * devices. For controllers with active devices, the resources are kept 1583 * ON and the link is expected to be in L0/L1 (sub)states. 1584 * 1585 * Turning OFF the resources for controllers with active PCIe devices 1586 * will trigger access violation during the end of the suspend cycle, 1587 * as kernel tries to access the PCIe devices config space for masking 1588 * MSIs. 1589 * 1590 * Also, it is not desirable to put the link into L2/L3 state as that 1591 * implies VDD supply will be removed and the devices may go into 1592 * powerdown state. This will affect the lifetime of the storage devices 1593 * like NVMe. 1594 */ 1595 if (!dw_pcie_link_up(pcie->pci)) { 1596 qcom_pcie_host_deinit(&pcie->pci->pp); 1597 pcie->suspended = true; 1598 } 1599 1600 return 0; 1601 } 1602 1603 static int qcom_pcie_resume_noirq(struct device *dev) 1604 { 1605 struct qcom_pcie *pcie = dev_get_drvdata(dev); 1606 int ret; 1607 1608 if (pcie->suspended) { 1609 ret = qcom_pcie_host_init(&pcie->pci->pp); 1610 if (ret) 1611 return ret; 1612 1613 pcie->suspended = false; 1614 } 1615 1616 qcom_pcie_icc_update(pcie); 1617 1618 return 0; 1619 } 1620 1621 static const struct of_device_id qcom_pcie_match[] = { 1622 { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, 1623 { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, 1624 { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, 1625 { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, 1626 { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, 1627 { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, 1628 { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 }, 1629 { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 }, 1630 { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, 1631 { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, 1632 { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 }, 1633 { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0}, 1634 { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, 1635 { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 }, 1636 { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 }, 1637 { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 }, 1638 { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 }, 1639 { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 }, 1640 { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 }, 1641 { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 }, 1642 { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 }, 1643 { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 }, 1644 { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 }, 1645 { } 1646 }; 1647 1648 static void qcom_fixup_class(struct pci_dev *dev) 1649 { 1650 dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; 1651 } 1652 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class); 1653 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class); 1654 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class); 1655 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class); 1656 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class); 1657 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class); 1658 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class); 1659 1660 static const struct dev_pm_ops qcom_pcie_pm_ops = { 1661 NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq) 1662 }; 1663 1664 static struct platform_driver qcom_pcie_driver = { 1665 .probe = qcom_pcie_probe, 1666 .driver = { 1667 .name = "qcom-pcie", 1668 .suppress_bind_attrs = true, 1669 .of_match_table = qcom_pcie_match, 1670 .pm = &qcom_pcie_pm_ops, 1671 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1672 }, 1673 }; 1674 builtin_platform_driver(qcom_pcie_driver); 1675